[
  {
    "library": "pytorch",
    "name": "is_tensor",
    "source_code": "def is_tensor(obj: _Any, /) -> _TypeIs['torch.Tensor']:\n    return isinstance(obj, torch.Tensor)",
    "docstring": "Returns True if is a PyTorch tensor. Note that this function is simply doing ``. Args: obj (object): Object to test Example:: >>> x = torch.tensor([1, 2, 3]) >>> torch.is_tensor(x) True",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:is_tensor arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_count",
    "source_code": "def get_count(self, using):\n    obj = self.clone()\n    return obj.get_aggregation(using, {'__count': Count('*')})['__count']",
    "docstring": "Perform a COUNT() query using the current filter constraints.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:get_count arg:self arg:using arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_metric_summary",
    "source_code": "def get_metric_summary(metric_name):\n    metric = _METRICS_MAPPING[metric_name]\n    result = metric.get_cell().value()\n    if isinstance(metric, monitoring.Sampler):\n        result = _get_metric_histogram(result)\n    return result",
    "docstring": "Get summary for the specified metric.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\metric_utils.py",
    "ast_data": "FunctionDef name:get_metric_summary arg:metric_name arguments arg Assign Assign Call Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_is_device_backend_autoload_enabled",
    "source_code": "def _is_device_backend_autoload_enabled() -> builtins.bool:\n    return os.getenv('TORCH_DEVICE_BACKEND_AUTOLOAD', '1') == '1'",
    "docstring": "Whether autoloading out-of-the-tree device extensions is enabled. The switch depends on the value of the environment variable . Returns: bool: Whether to enable autoloading the extensions. Enabled by default. Examples: >>> torch._is_device_backend_autoload_enabled() True",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_is_device_backend_autoload_enabled arguments Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_override_gradient_function",
    "source_code": "@tf_contextlib.contextmanager\ndef _override_gradient_function(self, gradient_function_map) -> Iterator[None]:\n    assert not self._gradient_function_map\n    self._gradient_function_map = gradient_function_map\n    try:\n        yield\n    finally:\n        self._gradient_function_map = {}",
    "docstring": "Specify gradient function for the given op type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_override_gradient_function arg:self arg:gradient_function_map arguments arg arg Assign Try Assign"
  },
  {
    "library": "tensorflow",
    "name": "_remove_squeezable_dimensions",
    "source_code": "def _remove_squeezable_dimensions(labels, predictions, weights=None, expected_rank_diff=0):\n    labels, predictions = confusion_matrix.remove_squeezable_dimensions(labels, predictions, expected_rank_diff=expected_rank_diff)\n    if weights is not None:\n        weights = ops.convert_to_tensor(weights)\n        labels_rank = labels.get_shape().ndims\n        weights_shape = weights.get_shape()\n        weights_rank = weights_shape.ndims\n        if labels_rank is not None and weights_rank is not None:\n            rank_diff = weights_rank - labels_rank\n            if rank_diff == 1:\n                weights = array_ops.squeeze(weights, [-1])\n            return (labels, predictions, weights)\n        rank_diff = array_ops.rank(weights) - array_ops.rank(labels)\n        if weights_rank is None or (weights_rank > 0 and weights_shape.dims[-1].is_compatible_with(1)):\n            weights = cond.cond(math_ops.equal(1, rank_diff), lambda: array_ops.squeeze(weights, [-1]), lambda: weights)\n    return (labels, predictions, weights)",
    "docstring": "Internal version of _remove_squeezable_dimensions which handles weights. Squeezes and if their ranks differ from expected by exactly 1. Squeezes if its rank is 1 more than the new rank of This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: labels: Label values, a whose dimensions match . predictions: Predicted values, a of arbitrary dimensions. weights: Optional weight . It will be squeezed if it's not scalar, and its rank is 1 more than the new rank of . expected_rank_diff: Expected result of . Returns: Tuple of , and , possibly with the last dimension squeezed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\losses_impl.py",
    "ast_data": "FunctionDef name:_remove_squeezable_dimensions arg:labels arg:predictions arg:weights arg:expected_rank_diff arguments arg arg arg arg Assign Call If Compare Assign Call Assign Call Assign Call Assign If BoolOp Compare Compare Assign If Compare Assign Call Return return:yes Assign Call Call If BoolOp Compare BoolOp Compare Call Assign Call Call arguments Call arguments Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "QueueProtocol",
    "source_code": "class QueueProtocol(Protocol):\n\n    def push(self, request: Request) -> None:\n        ...\n\n    def pop(self) -> Request | None:\n        ...\n\n    def close(self) -> None:\n        ...\n\n    def __len__(self) -> int:\n        ...",
    "docstring": "Protocol for downstream queues of ``.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\pqueues.py",
    "ast_data": "ClassDef name:QueueProtocol FunctionDef name:push arg:self arg:request arguments arg arg FunctionDef name:pop arg:self arguments arg FunctionDef name:close arg:self arguments arg FunctionDef name:__len__ arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "delay_unpack_hook_nodes",
    "source_code": "def delay_unpack_hook_nodes(self):\n    for node in self.fx_tracer.graph.find_nodes(op='call_function', target=call_hook):\n        if node.kwargs.get('hook_type', None) != 'unpack_hook':\n            continue\n        first_user = min(node.users)\n        first_user.prepend(node)",
    "docstring": "We can delay unpack hooks until they are needed, even later than in the eager autograd engine.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\compiled_autograd.py",
    "ast_data": "FunctionDef name:delay_unpack_hook_nodes arg:self arguments arg For Call If Compare Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_retrieve",
    "source_code": "@abc.abstractmethod\ndef _retrieve(self) -> Callable[..., core.Tensor]:\n    raise NotImplementedError",
    "docstring": "Returns the retrieve function for the optimizer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py",
    "ast_data": "FunctionDef name:_retrieve arg:self arguments arg Raise"
  },
  {
    "library": "django",
    "name": "save",
    "source_code": "def save(self, commit=True):\n    if not commit:\n        self.saved_forms = []\n\n        def save_m2m():\n            for form in self.saved_forms:\n                form.save_m2m()\n        self.save_m2m = save_m2m\n    if self.edit_only:\n        return self.save_existing_objects(commit)\n    else:\n        return self.save_existing_objects(commit) + self.save_new_objects(commit)",
    "docstring": "Save model instances for every form, adding and changing instances as necessary, and return the list of instances.",
    "type": "method",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:save arg:self arg:commit arguments arg arg If Assign FunctionDef name:save_m2m arguments For Call Assign If Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config, custom_objects=None, columns_by_name=None):\n    _check_config_keys(config, cls._fields)\n    kwargs = _standardize_and_copy_config(config)\n    kwargs['dtype'] = dtypes.as_dtype(config['dtype'])\n    return cls(**kwargs)",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arg:custom_objects arg:columns_by_name arguments arg arg arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "KeySet",
    "source_code": "class KeySet:\n\n    def __init__(self, keys):\n        self.keys = keys\n\n    def as_dict(self, is_private=False, **params):\n        return {'keys': [k.as_dict(is_private, **params) for k in self.keys]}\n\n    def as_json(self, is_private=False, **params):\n        obj = self.as_dict(is_private, **params)\n        return json_dumps(obj)\n\n    def find_by_kid(self, kid):\n        if kid is None and len(self.keys) == 1:\n            return self.keys[0]\n        for k in self.keys:\n            if k.kid == kid:\n                return k\n        raise ValueError('Invalid JSON Web Key Set')",
    "docstring": "This class represents a JSON Web Key Set.",
    "type": "class",
    "file_path": "authlib\\authlib\\jose\\rfc7517\\key_set.py",
    "ast_data": "ClassDef name:KeySet FunctionDef name:__init__ arg:self arg:keys arguments arg arg Assign FunctionDef name:as_dict arg:self arg:is_private arguments arg arg arg Return return:yes Call FunctionDef name:as_json arg:self arg:is_private arguments arg arg arg Assign Call Return return:yes Call FunctionDef name:find_by_kid arg:self arg:kid arguments arg arg If BoolOp Compare Compare Call Return return:yes For If Compare Return return:yes Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, patch):\n    super().__init__(patch.get_path(), patch.get_transform())\n    self._patch = patch",
    "docstring": "Parameters ---------- patch :",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:patch arguments arg arg Call Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "generate_dynamo_fx_repro_string",
    "source_code": "def generate_dynamo_fx_repro_string(gm, args, compiler_name, check_accuracy=False, *, stable_output=False, save_dir=None, command='run'):\n    model_str = NNModuleToString.convert(gm)\n    writer = InputWriter(save_dir, stable_hash=True)\n    for placeholder, arg in zip(fx_placeholder_targets(gm), args):\n        if isinstance(arg, (int, torch.SymInt)):\n            writer.symint(placeholder, arg)\n        elif isinstance(arg, torch.Tensor):\n            writer.tensor(placeholder, arg)\n        else:\n            raise TypeError(f'arg is neither SymInt/int nor torch.Tensor, {arg}')\n    load_args = '\\n'.join(writer.lines())\n    return textwrap.dedent(f\"\\n{generate_env_vars_string(stable_output=stable_output)}\\nfrom math import inf\\nimport torch\\nfrom torch import tensor, device\\nimport torch.fx as fx\\nimport torch._dynamo\\nfrom torch._dynamo.testing import rand_strided\\nfrom torch._dynamo.debug_utils import run_fwd_maybe_bwd\\n\\n{generate_config_string(stable_output=stable_output)}\\n\\n{extra_imports}\\n\\n{model_str}\\nmod = Repro()\\n\\n{load_args}\\n\\nif __name__ == '__main__':\\n    from torch._dynamo.repro.after_dynamo import run_repro\\n    run_repro(mod, load_args, accuracy={check_accuracy!r}, command={command!r},\\n        save_dir={save_dir!r}, autocast={torch.is_autocast_enabled()!r}, backend={compiler_name!r})\\n\")",
    "docstring": "Generate a repro string for backend-agnostic minified version.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\repro\\after_dynamo.py",
    "ast_data": "FunctionDef name:generate_dynamo_fx_repro_string arg:gm arg:args arg:compiler_name arg:check_accuracy arguments arg arg arg arg arg arg arg Assign Call Assign Call For Call Call If Call Call If Call Call Raise Call Assign Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "version",
    "source_code": "@property\ndef version(self) -> tuple[int, int, int]:\n    version = getattr(self.group._v_attrs, 'pandas_version', None)\n    if isinstance(version, str):\n        version_tup = tuple((int(x) for x in version.split('.')))\n        if len(version_tup) == 2:\n            version_tup = version_tup + (0,)\n        assert len(version_tup) == 3\n        return version_tup\n    else:\n        return (0, 0, 0)",
    "docstring": "compute and set our version",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:version arg:self arguments arg Assign Call If Call Assign Call Call Call If Compare Call Assign Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LiftParametersAndBuffersIntoArgsInputStep",
    "source_code": "class LiftParametersAndBuffersIntoArgsInputStep(InputAdaptStep):\n\n    def __init__(self, inputs: tuple[torch.Tensor, ...]) -> None:\n        self.inputs = inputs\n\n    def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n        return ((*model_args, *self.inputs), model_kwargs)",
    "docstring": "Append parameters and buffers to model's positional argument list.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "ClassDef name:LiftParametersAndBuffersIntoArgsInputStep FunctionDef name:__init__ arg:self arg:inputs arguments arg arg Assign FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "table_alias",
    "source_code": "def table_alias(self, table_name, create=False, filtered_relation=None):\n    alias_list = self.table_map.get(table_name)\n    if not create and alias_list:\n        alias = alias_list[0]\n        self.alias_refcount[alias] += 1\n        return (alias, False)\n    if alias_list:\n        alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)\n        alias_list.append(alias)\n    else:\n        alias = filtered_relation.alias if filtered_relation is not None else table_name\n        self.table_map[table_name] = [alias]\n    self.alias_refcount[alias] = 1\n    return (alias, True)",
    "docstring": "Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:table_alias arg:self arg:table_name arg:create arg:filtered_relation arguments arg arg arg arg Assign Call If BoolOp Assign Return return:yes If Assign Call Call Assign Compare Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "A",
    "source_code": "def A(self):\n    if self.data.hour > 11:\n        return _('PM')\n    return _('AM')",
    "docstring": "'AM' or 'PM'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:A arg:self arguments arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "transform_feature",
    "source_code": "def transform_feature(self, transformation_cache, state_manager):\n    return self.categorical_column.transform_feature(transformation_cache, state_manager)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "initialized",
    "source_code": "def initialized(self, response: Response | None=None) -> Any:\n    return self.__dict__.pop('_postinit_reqs')",
    "docstring": "This method must be set as the callback of your last initialization request. See self.init_request() docstring for more info.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiders\\init.py",
    "ast_data": "FunctionDef name:initialized arg:self arg:response arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "spatial_soft_argmax2d",
    "source_code": "def spatial_soft_argmax2d(input: Tensor, temperature: Optional[Tensor]=None, normalized_coordinates: bool=True) -> Tensor:\n    if temperature is None:\n        temperature = tensor(1.0)\n    input_soft: Tensor = spatial_softmax2d(input, temperature)\n    output: Tensor = spatial_expectation2d(input_soft, normalized_coordinates)\n    return output",
    "docstring": "Compute the Spatial Soft-Argmax 2D of a given input heatmap. Args: input: the given heatmap with shape :math:. temperature: factor to apply to input. normalized_coordinates: whether to return the coordinates normalized in the range of :math:. Otherwise, it will return the coordinates in the range of the input shape. Returns: the index of the maximum 2d coordinates of the give map :math:. The output order is x-coord and y-coord. Examples: >>> input = torch.tensor([[[ ... [0., 0., 0.], ... [0., 10., 0.], ... [0., 0., 0.]]]]) >>> spatial_soft_argmax2d(input, normalized_coordinates=False) tensor([[[1.0000, 1.0000]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\subpix\\spatial_soft_argmax.py",
    "ast_data": "FunctionDef name:spatial_soft_argmax2d arg:input arg:temperature arg:normalized_coordinates arguments arg arg arg If Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "sharpness",
    "source_code": "def sharpness(probability: float, magnitude: int) -> OperationBase:\n    magnitudes = linspace(0.1, 1.9, 11)\n    return Sharpness(None, probability, magnitude_range=(magnitudes[magnitude].item(), magnitudes[magnitude + 1].item()))",
    "docstring": "Return sharpness op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\ops.py",
    "ast_data": "FunctionDef name:sharpness arg:probability arg:magnitude arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, opt, reduction=losses.Reduction.MEAN, name='CrossShardOptimizer', group_assignment=None):\n    accepted_reductions = (losses.Reduction.SUM, losses.Reduction.MEAN)\n    if reduction not in accepted_reductions:\n        raise ValueError(f'Argument `reduction` should be one of {accepted_reductions}. Received: {reduction}')\n    if not isinstance(opt, optimizer.Optimizer):\n        raise TypeError(f'CrossShardOptimizer only works with tf.training.Optimizer and not Keras Optimizer. Received: {opt}. If you are using TPUStrategy, Keras Optimizer will sum gradients across replicas.If you want to average your gradients, rescale your loss with: `loss /= global_batch_size`')\n    super(CrossShardOptimizer, self).__init__(False, name)\n    self._opt = opt\n    self._reduction = reduction\n    self._group_assignment = group_assignment",
    "docstring": "Construct a new cross-shard optimizer. Args: opt: An existing to encapsulate. reduction: The reduction to apply to the shard losses. name: Optional name prefix for the operations created when applying gradients. Defaults to \"CrossShardOptimizer\". group_assignment: Optional 2d int32 lists with shape [num_groups, num_replicas_per_group] which describles how to apply optimizer to subgroups. Raises: ValueError: If reduction is not a valid cross-shard reduction.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_optimizer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:opt arg:reduction arg:name arg:group_assignment arguments arg arg arg arg arg Assign If Compare Raise Call If Call Raise Call Call Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_SymGrad",
    "source_code": "def _SymGrad(op: ops.Operation, out_grads):\n    f_in = [x for x in op.inputs] + out_grads\n    f_types = [default_gradient.get_zeros_dtype(x) for x in op.inputs]\n    f = attr_value_pb2.NameAttrList()\n    if _IsPartitionedCall(op):\n        f.name = op.get_attr('f').name\n    else:\n        f.name = op.type\n    for k in op.node_def.attr:\n        f.attr[k].CopyFrom(op.node_def.attr[k])\n    in_grads = gen_functional_ops.symbolic_gradient(input=f_in, Tout=f_types, f=f)\n    return in_grads",
    "docstring": "Backprop through a function call node op given its outputs' gradients.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradients_util.py",
    "ast_data": "FunctionDef name:_SymGrad arg:op arg:out_grads arguments arg arg Assign Assign Call Assign Call If Call Assign Call Assign For Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "maybe_increase_iter",
    "source_code": "def maybe_increase_iter(self, bucket):\n    if bucket.is_last():\n        self.iter += 1\n    if self.iter == self.start_powerSGD_iter:\n        logger.info('Start to apply PowerSGD after %s iterations.', self.iter)",
    "docstring": "Track iterations and trigger log message at start of local SGD.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\powerSGD_hook.py",
    "ast_data": "FunctionDef name:maybe_increase_iter arg:self arg:bucket arguments arg arg If Call If Compare Call"
  },
  {
    "library": "scipy",
    "name": "LogRankResult",
    "source_code": "@dataclass\nclass LogRankResult:\n    statistic: np.ndarray\n    pvalue: np.ndarray",
    "docstring": "Result object returned by . Attributes ---------- statistic : float ndarray The computed statistic (defined below). Its magnitude is the square root of the magnitude returned by most other logrank test implementations. pvalue : float ndarray The computed p-value of the test.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_survival.py",
    "ast_data": "ClassDef name:LogRankResult"
  },
  {
    "library": "tensorflow",
    "name": "_multiply_gradient",
    "source_code": "def _multiply_gradient(gradient, scale):\n    scale = math_ops.cast(scale, gradient.dtype)\n    if isinstance(gradient, indexed_slices.IndexedSlices):\n        return indexed_slices.IndexedSlices(gradient.values * scale, gradient.indices, dense_shape=gradient.dense_shape)\n    else:\n        return gradient * scale",
    "docstring": "Multiply a (possibly sparse) gradient by the given scale factor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:_multiply_gradient arg:gradient arg:scale arguments arg arg Assign Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_chief",
    "source_code": "def _is_chief(self):\n    if not self._cluster_spec or self._task_type in [_TaskType.CHIEF, _TaskType.EVALUATOR, None]:\n        return True\n    if _TaskType.CHIEF not in self._cluster_spec.jobs and self._task_type == _TaskType.WORKER and (self._task_id == 0):\n        return True\n    return False",
    "docstring": "Return whether the task is the chief worker.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:_is_chief arg:self arguments arg If BoolOp Compare Return return:yes If BoolOp Compare Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "pygame",
    "name": "install_pacman_package",
    "source_code": "def install_pacman_package(pkg_name):\n    output = subprocess.run(['pacman', '-S', '--noconfirm', pkg_name], capture_output=True, text=True)\n    if output.returncode != 0:\n        logging.error('Error {} while downloading package {}: \\n{}'.format(output.returncode, pkg_name, output.stderr))\n    return output.returncode != 0",
    "docstring": "This installs a package in the current MSYS2 environment Does not download again if the package is already installed and if the version is the latest available in MSYS2",
    "type": "function",
    "file_path": "pygame\\buildconfig\\download_msys2_prebuilt.py",
    "ast_data": "FunctionDef name:install_pacman_package arg:pkg_name arguments arg Assign Call If Compare Call Call Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "GemmConfig",
    "source_code": "@dataclasses.dataclass\nclass GemmConfig(BaseConfig):\n    group_m: int = 8",
    "docstring": "Gemm configuration used for most backends (CPU, CUDA)",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\template_heuristics.py",
    "ast_data": "ClassDef name:GemmConfig"
  },
  {
    "library": "pandas",
    "name": "_is_type_compatible",
    "source_code": "def _is_type_compatible(a, b) -> bool:\n    is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset))\n    is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset))\n    return is_number(a) and is_number(b) or (is_ts_compat(a) and is_ts_compat(b)) or (is_td_compat(a) and is_td_compat(b)) or com.any_none(a, b)",
    "docstring": "Helper for interval_range to check type compat of start/end/freq.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:_is_type_compatible arg:a arg:b arguments arg arg Assign arguments arg Call Assign arguments arg Call Return return:yes BoolOp BoolOp Call Call BoolOp Call Call BoolOp Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Sequence[Any]:\n    return pytree.tree_leaves(model_outputs)",
    "docstring": "Flatten the model outputs. Args: model_outputs: The model outputs to flatten. model: The PyTorch model. Returns: A tuple of the flattened model outputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "docs",
    "source_code": "@spin.util.extend_command(spin.cmds.meson.docs)\ndef docs(*, parent_callback, **kwargs):\n    kwargs['clean_dirs'] = ['./doc/build/', './doc/source/reference/generated', './doc/source/reference/random/bit_generators/generated', './doc/source/reference/random/generated']\n    cmd = ['towncrier', 'build', '--version', '2.x.y', '--keep', '--draft']\n    p = subprocess.run(cmd, check=True, capture_output=True, text=True)\n    outfile = curdir.parent / 'doc' / 'source' / 'release' / 'notes-towncrier.rst'\n    with open(outfile, 'w') as f:\n        f.write(p.stdout)\n    parent_callback(**kwargs)",
    "docstring": "📖 Build Sphinx documentation By default, SPHINXOPTS=\"-W\", raising errors on warnings. To build without raising on warnings: SPHINXOPTS=\"\" spin docs To list all Sphinx targets: spin docs targets To build another Sphinx target: spin docs TARGET E.g., to build a zipfile of the html docs for distribution: spin docs dist",
    "type": "function",
    "file_path": "numpy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:docs arguments arg arg Assign Assign Assign Call Assign With Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "NotEncodableError",
    "source_code": "class NotEncodableError(Exception):\n    pass",
    "docstring": "Error raised when a coder cannot encode an object.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "ClassDef name:NotEncodableError"
  },
  {
    "library": "sphinx",
    "name": "VersionRequirementError",
    "source_code": "class VersionRequirementError(SphinxError):\n    category = 'Sphinx version error'",
    "docstring": "Incompatible Sphinx version error.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\errors.py",
    "ast_data": "ClassDef name:VersionRequirementError Assign"
  },
  {
    "library": "numpy",
    "name": "get_libraries",
    "source_code": "def get_libraries(self):\n    return self.libraries[:]",
    "docstring": "List of compiler libraries.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:get_libraries arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_hunks",
    "source_code": "def parse_hunks(diff: str) -> list[Hunk]:\n    diff_pattern = 'diff --git a/.* b/(.*)\\\\n(?:\\\\w+ file mode \\\\d+\\\\n)?index .*\\\\n--- .*\\\\n\\\\+\\\\+\\\\+ .*\\\\n'\n    hunk_header_pattern = '@@ -\\\\d+,\\\\d+ \\\\+(\\\\d+),(\\\\d+) @@.*\\\\n'\n    raw_per_file_hunks = re.split(diff_pattern, diff)[1:]\n    parsed_hunks = []\n    for file, raw_hunks in batch(raw_per_file_hunks, 2):\n        hunks = re.split(hunk_header_pattern, raw_hunks, re.MULTILINE)[1:]\n        for start, length, body in batch(hunks, 3):\n            lines = body.split('\\n')\n            lines = lines if lines[-1] else lines[:-1]\n            parsed_hunks.append(Hunk(file, int(start), int(length), lines))\n    return parsed_hunks",
    "docstring": "Parses a diff into hunks. Arguments: diff: The raw output of git diff. Returns: A list of Hunks.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\lint\\diff_parser.py",
    "ast_data": "FunctionDef name:parse_hunks arg:diff arguments arg Assign Assign Assign Call Assign For Call Assign Call For Call Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "UnaryPredicate",
    "source_code": "class UnaryPredicate(GEOSFuncFactory):\n    argtypes = [GEOM_PTR]\n    restype = c_byte\n    errcheck = staticmethod(check_predicate)",
    "docstring": "For GEOS unary predicate functions.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\predicates.py",
    "ast_data": "ClassDef name:UnaryPredicate Assign Assign Assign Call"
  },
  {
    "library": "pandas",
    "name": "_validate_numeric_only",
    "source_code": "def _validate_numeric_only(self, name: str, numeric_only: bool) -> None:\n    if self._selected_obj.ndim == 1 and numeric_only and (not is_numeric_dtype(self._selected_obj.dtype)):\n        raise NotImplementedError(f'{type(self).__name__}.{name} does not implement numeric_only')",
    "docstring": "Validate numeric_only argument, raising if invalid for the input. Parameters ---------- name : str Name of the operator (kernel). numeric_only : bool Value passed by user.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:_validate_numeric_only arg:self arg:name arg:numeric_only arguments arg arg arg If BoolOp Compare Call Raise Call Call"
  },
  {
    "library": "numpy",
    "name": "__imul__",
    "source_code": "def __imul__(self, other):\n    m = getmask(other)\n    if self._mask is nomask:\n        if m is not nomask and m.any():\n            self._mask = make_mask_none(self.shape, self.dtype)\n            self._mask += m\n    elif m is not nomask:\n        self._mask += m\n    other_data = getdata(other)\n    other_data = np.where(self._mask, other_data.dtype.type(1), other_data)\n    self._data.__imul__(other_data)\n    return self",
    "docstring": "Multiply self by other in-place.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__imul__ arg:self arg:other arguments arg arg Assign Call If Compare If BoolOp Compare Call Assign Call If Compare Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "signature_summary",
    "source_code": "def signature_summary(self, default_values=False):\n    summary = f'{self._function_type!r}'\n    if default_values:\n        summary += '\\nDefaults:'\n        if self.default_values:\n            for name, value in self.default_values.items():\n                summary += f'\\n  {name}: {value!r}'\n        else:\n            summary += '\\n  None'\n    return summary",
    "docstring": "Returns a string summarizing this function's signature. Args: default_values: If true, then include default values in the signature. Returns: A .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py",
    "ast_data": "FunctionDef name:signature_summary arg:self arg:default_values arguments arg arg Assign If If For Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "record_memory_history",
    "source_code": "def record_memory_history(enabled: Optional[str]='all', stacks: str='python', max_entries: int=0) -> None:\n    if not is_initialized():\n        return\n    torch._C._mtia_recordMemoryHistory(enabled, stacks, max_entries)",
    "docstring": "Enable/Disable the memory profiler on MTIA allocator Args: enabled (all or state, optional) selected device. Returns statistics for the current device, given by current_device(), if device is None (default). stacks (\"python\" or \"cpp\", optional). Select the stack trace to record. max_entries (int, optional). Maximum number of entries to record.",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:record_memory_history arg:enabled arg:stacks arg:max_entries arguments arg arg arg If Call Return return:no Call"
  },
  {
    "library": "pytorch",
    "name": "from_float",
    "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n    msg = ' nnq.' + cls.__name__ + '.from_float only works for ' + cls._FLOAT_MODULE.__name__\n    assert type(mod) == cls._FLOAT_MODULE, msg\n    assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined.'\n    weight_post_process = mod.qconfig.weight()\n    weight_post_process(mod.weight)\n    assert weight_post_process.dtype == torch.qint8, 'Weight observer must have a dtype of qint8'\n    qweight = _quantize_weight(mod.weight.float(), weight_post_process)\n    qconv = cls(mod.in_channels, mod.out_channels, mod.kernel_size, mod.stride, mod.padding, mod.output_padding, mod.groups, mod.bias is not None, mod.dilation, mod.padding_mode)\n    qconv.set_weight_bias(qweight, mod.bias)\n    if not hasattr(mod, 'activation_post_process') or mod.activation_post_process.dtype == torch.float:\n        return qconv\n    else:\n        act_scale, act_zp = mod.activation_post_process.calculate_qparams()\n        qconv.scale = float(act_scale)\n        qconv.zero_point = int(act_zp)\n        return qconv",
    "docstring": "Creates a quantized module from a float module or qparams_dict. Args: mod (Module): a float module, either produced by torch.ao.quantization utilities or provided by the user",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\conv.py",
    "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Assign Compare Call Call Assign Call Call Compare Assign Call Call Assign Call Compare Call If BoolOp Call Compare Return return:yes Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@abstractmethod\ndef fit(self, X, y=None):\n    pass",
    "docstring": "Placeholder for fit. Subclasses should implement this method! Fit the model with X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_base.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg"
  },
  {
    "library": "scipy",
    "name": "mat_struct",
    "source_code": "class mat_struct:\n    pass",
    "docstring": "Placeholder for holding read data from structs. We use instances of this class when the user passes False as a value to the `scipy.io.loadmat` function.",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5_params.py",
    "ast_data": "ClassDef name:mat_struct"
  },
  {
    "library": "tensorflow",
    "name": "context_switches",
    "source_code": "@property\ndef context_switches(self):\n    return self._context_switches",
    "docstring": "Returns a stack of context switches.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:context_switches arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_full_batch_training_op",
    "source_code": "def _full_batch_training_op(self, inputs, num_clusters, cluster_idx_list, cluster_centers):\n    cluster_sums = []\n    cluster_counts = []\n    epsilon = constant_op.constant(1e-06, dtype=inputs[0].dtype)\n    for inp, cluster_idx in zip(inputs, cluster_idx_list):\n        with ops.colocate_with(inp, ignore_existing=True):\n            cluster_sums.append(math_ops.unsorted_segment_sum(inp, cluster_idx, num_clusters))\n            cluster_counts.append(math_ops.unsorted_segment_sum(array_ops.reshape(array_ops.ones(array_ops.reshape(array_ops.shape(inp)[0], [-1])), [-1, 1]), cluster_idx, num_clusters))\n    with ops.colocate_with(cluster_centers, ignore_existing=True):\n        new_clusters_centers = math_ops.add_n(cluster_sums) / (math_ops.cast(math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)\n        if self._clusters_l2_normalized():\n            new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)\n    return state_ops.assign(cluster_centers, new_clusters_centers)",
    "docstring": "Creates an op for training for full batch case. Args: inputs: list of input Tensors. num_clusters: an integer Tensor providing the number of clusters. cluster_idx_list: A vector (or list of vectors). Each element in the vector corresponds to an input row in 'inp' and specifies the cluster id corresponding to the input. cluster_centers: Tensor Ref of cluster centers. Returns: An op for doing an update of mini-batch k-means.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:_full_batch_training_op arg:self arg:inputs arg:num_clusters arg:cluster_idx_list arg:cluster_centers arguments arg arg arg arg arg Assign Assign Assign Call For Call With Call Call Call Call Call Call Call Call Call With Call Assign Call Call Call If Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "point_at",
    "source_code": "def point_at(self, t: Union[float, Tensor]) -> Tensor:\n    return self.origin + self.direction * t",
    "docstring": "Get the point at :math: along this line. Args: t: step along the line. Return: tensor with the point. Example: >>> p0 = torch.tensor([0.0, 0.0]) >>> p1 = torch.tensor([1.0, 1.0]) >>> l = ParametrizedLine.through(p0, p1) >>> p2 = l.point_at(0.1)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\line.py",
    "ast_data": "FunctionDef name:point_at arg:self arg:t arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_all_unique_module_fqns",
    "source_code": "def get_all_unique_module_fqns(self) -> set[str]:\n    return set(self.generated_reports.keys())",
    "docstring": "The purpose of this method is to provide a user the set of all module_fqns so that if they wish to use some of the filtering capabilities of the ModelReportVisualizer class, they don't need to manually parse the generated_reports dictionary to get this information. Returns all the unique module fqns present in the reports the ModelReportVisualizer instance was initialized with.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report_visualizer.py",
    "ast_data": "FunctionDef name:get_all_unique_module_fqns arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "StringGaugeCell",
    "source_code": "class StringGaugeCell(object):\n    __slots__ = ['_cell']\n\n    def __init__(self, cell):\n        self._cell = cell\n\n    def set(self, value):\n        pywrap_tfe.TFE_MonitoringStringGaugeCellSet(self._cell, value)\n\n    def value(self):\n        with c_api_util.tf_buffer() as buffer_:\n            pywrap_tfe.TFE_MonitoringStringGaugeCellValue(self._cell, buffer_)\n            value = pywrap_tf_session.TF_GetBuffer(buffer_).decode('utf-8')\n        return value",
    "docstring": "A single string value stored in an .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "ClassDef name:StringGaugeCell Assign FunctionDef name:__init__ arg:self arg:cell arguments arg arg Assign FunctionDef name:set arg:self arg:value arguments arg arg Call FunctionDef name:value arg:self arguments arg With Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_init_summary_op",
    "source_code": "def _init_summary_op(self, summary_op=USE_DEFAULT):\n    if summary_op is Supervisor.USE_DEFAULT:\n        summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)\n        if summary_op is None:\n            summary_op = _summary.merge_all()\n            if summary_op is not None:\n                ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)\n    self._summary_op = summary_op",
    "docstring": "Initializes summary_op. Args: summary_op: An Operation that returns a Summary for the event logs. If set to USE_DEFAULT, create an op that merges all the summaries.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:_init_summary_op arg:self arg:summary_op arguments arg arg If Compare Assign Call If Compare Assign Call If Compare Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_bound_variable",
    "source_code": "def _bound_variable(self, name: str, *args: Any, **kwargs: Any) -> ValueRanges[Any]:\n    from ..bounds import ValueRangeAnalysis\n    from ..select_algorithm import TritonTemplateKernel\n    from .cuda.cuda_kernel import CUDATemplateKernel\n    if isinstance(V.kernel, TritonTemplateKernel):\n        return ValueRanges.unknown()\n    if isinstance(V.kernel, CUDATemplateKernel):\n        return ValueRanges.unknown()\n    fx_node = V.interpreter.current_node\n    if fx_node.target == name and self.kernel.node_to_bounds is not None:\n        assert isinstance(self.kernel.node_to_bounds, dict), type(self.kernel.node_to_bounds)\n        return self.kernel.node_to_bounds.get(fx_node, ValueRanges.unknown())\n    elif config.compute_all_bounds and hasattr(ValueRangeAnalysis, name):\n        if any((s in fx_node.target for s in ('set_indirect', 'reduction', 'scan'))):\n            return ValueRanges.unknown()\n        assert not kwargs\n\n        def arg_to_bound(x: Any) -> Any:\n            if isinstance(x, CSEVariable):\n                return x.bounds\n            elif isinstance(x, sympy.Expr):\n                return bound_sympy(x)\n            else:\n                return x\n        arg_bounds = list(map(arg_to_bound, args))\n        return getattr(self.vr_analysis, name)(*arg_bounds)\n    return ValueRanges.unknown()",
    "docstring": "If the variable comes from an FX node, we forward the bound we have already computed Else, if the variable when codegen'ing another op, we try to compute its bounds",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "FunctionDef name:_bound_variable arg:self arg:name arguments arg arg arg arg If Call Return return:yes Call If Call Return return:yes Call Assign If BoolOp Compare Compare Call Call Return return:yes Call Call If BoolOp Call If Call Compare Return return:yes Call FunctionDef name:arg_to_bound arg:x arguments arg If Call Return return:yes If Call Return return:yes Call Return return:yes Assign Call Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "executor_scope",
    "source_code": "@tf_contextlib.contextmanager\ndef executor_scope(e):\n    ctx = context()\n    executor_old = ctx.executor\n    try:\n        ctx.executor = e\n        yield\n    finally:\n        ctx.executor = executor_old",
    "docstring": "Context manager for changing executor for current thread. Args: e: A Executor to execute eager ops under this scope. Setting it to None will switch back to use the default executor for the context. Yields: Context manager for setting the executor for current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:executor_scope arg:e arguments arg Assign Call Assign Try Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_logical_device_configuration",
    "source_code": "@tf_export('config.get_logical_device_configuration', 'config.experimental.get_virtual_device_configuration')\n@deprecation.deprecated_endpoints('config.experimental.get_virtual_device_configuration')\ndef get_logical_device_configuration(device):\n    return context.context().get_logical_device_configuration(device)",
    "docstring": "Get the virtual device configuration for a . Returns the list of objects previously configured by a call to . For example: >>> physical_devices = tf.config.list_physical_devices('CPU') >>> assert len(physical_devices) == 1, \"No CPUs found\" >>> configs = tf.config.get_logical_device_configuration( ... physical_devices[0]) >>> try: ... assert configs is None ... tf.config.set_logical_device_configuration( ... physical_devices[0], ... [tf.config.LogicalDeviceConfiguration(), ... tf.config.LogicalDeviceConfiguration()]) ... configs = tf.config.get_logical_device_configuration( ... physical_devices[0]) ... assert len(configs) == 2 ... except: ... # Cannot modify virtual devices once initialized. ... pass Args: device: to query Returns: List of objects or if no virtual device configuration has been set for this physical device.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:get_logical_device_configuration arg:device arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "GraphvizSimple",
    "source_code": "class GraphvizSimple(SphinxDirective):\n    has_content = True\n    required_arguments = 1\n    optional_arguments = 0\n    final_argument_whitespace = False\n    option_spec: ClassVar[OptionSpec] = {'alt': directives.unchanged, 'align': align_spec, 'caption': directives.unchanged, 'layout': directives.unchanged, 'graphviz_dot': directives.unchanged, 'name': directives.unchanged, 'class': directives.class_option}\n\n    def run(self) -> list[Node]:\n        node = graphviz()\n        dot_code = '\\n'.join(self.content)\n        node['code'] = f'{self.name} {self.arguments[0]} {{\\n{dot_code}\\n}}\\n'\n        node['options'] = {'docname': self.env.docname}\n        if 'graphviz_dot' in self.options:\n            node['options']['graphviz_dot'] = self.options['graphviz_dot']\n        if 'layout' in self.options:\n            node['options']['graphviz_dot'] = self.options['layout']\n        if 'alt' in self.options:\n            node['alt'] = self.options['alt']\n        if 'align' in self.options:\n            node['align'] = self.options['align']\n        if 'class' in self.options:\n            node['classes'] = self.options['class']\n        if 'caption' not in self.options:\n            self.add_name(node)\n            return [node]\n        else:\n            figure = figure_wrapper(self, node, self.options['caption'])\n            self.add_name(figure)\n            return [figure]",
    "docstring": "Directive to insert arbitrary dot markup.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\graphviz.py",
    "ast_data": "ClassDef name:GraphvizSimple Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Assign Call Assign Call Assign Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Call Return return:yes Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_paths_to_3d_segments_with_codes",
    "source_code": "def _paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):\n    zs = np.broadcast_to(zs, len(paths))\n    segments_codes = [_path_to_3d_segment_with_codes(path, pathz, zdir) for path, pathz in zip(paths, zs)]\n    if segments_codes:\n        segments, codes = zip(*segments_codes)\n    else:\n        segments, codes = ([], [])\n    return (list(segments), list(codes))",
    "docstring": "Convert paths from a collection object to 3D segments with path codes.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:_paths_to_3d_segments_with_codes arg:paths arg:zs arg:zdir arguments arg arg arg Assign Call Call Assign Call Call If Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_realized",
    "source_code": "def is_realized(self):\n    return True",
    "docstring": "Used by LazyVariableTracker to indicate an unrealized node",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "FunctionDef name:is_realized arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "read",
    "source_code": "def read(self, wkb):\n    if isinstance(wkb, memoryview):\n        wkb_s = bytes(wkb)\n        return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))\n    elif isinstance(wkb, bytes):\n        return wkb_reader_read_hex(self.ptr, wkb, len(wkb))\n    elif isinstance(wkb, str):\n        wkb_s = wkb.encode()\n        return wkb_reader_read_hex(self.ptr, wkb_s, len(wkb_s))\n    else:\n        raise TypeError",
    "docstring": "Return a _pointer_ to C GEOS Geometry object from the given WKB.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\io.py",
    "ast_data": "FunctionDef name:read arg:self arg:wkb arguments arg arg If Call Assign Call Return return:yes Call Call If Call Return return:yes Call Call If Call Assign Call Return return:yes Call Call Raise"
  },
  {
    "library": "django",
    "name": "delete",
    "source_code": "def delete(self, key, version=None):\n    raise NotImplementedError('subclasses of BaseCache must provide a delete() method')",
    "docstring": "Delete a key from the cache and return whether it succeeded, failing silently.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:delete arg:self arg:key arg:version arguments arg arg arg Raise Call"
  },
  {
    "library": "scrapy",
    "name": "memoizemethod_noargs",
    "source_code": "def memoizemethod_noargs(method: Callable[Concatenate[_SelfT, _P], _T]) -> Callable[Concatenate[_SelfT, _P], _T]:\n    cache: weakref.WeakKeyDictionary[_SelfT, _T] = weakref.WeakKeyDictionary()\n\n    @wraps(method)\n    def new_method(self: _SelfT, *args: _P.args, **kwargs: _P.kwargs) -> _T:\n        if self not in cache:\n            cache[self] = method(self, *args, **kwargs)\n        return cache[self]\n    return new_method",
    "docstring": "Decorator to cache the result of a method (without arguments) using a weak reference to its object",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\python.py",
    "ast_data": "FunctionDef name:memoizemethod_noargs arg:method arguments arg Call FunctionDef name:new_method arg:self arguments arg arg arg If Compare Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_initialize_nodes_and_concrete_functions",
    "source_code": "def _initialize_nodes_and_concrete_functions(self):\n    self.nodes = list(self._trackable_objects)\n    self.gradient_functions = []\n    self.gradient_defs = []\n    for obj in self.nodes:\n        if obj in self._saveable_objects_map:\n            for save_fn, restore_fn in self._saveable_objects_map[obj].values():\n                self.node_ids[save_fn] = len(self.nodes)\n                self.nodes.append(save_fn)\n                self.node_ids[restore_fn] = len(self.nodes)\n                self.nodes.append(restore_fn)\n    self.concrete_functions = [obj for obj in self.nodes if isinstance(obj, defun.ConcreteFunction)]",
    "docstring": "Creates graph with nodes for trackable objects and functions. Adds functions for each trackable object to and associated concrete functions to for serialization.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:_initialize_nodes_and_concrete_functions arg:self arguments arg Assign Call Assign Assign For If Compare For Call Assign Call Call Assign Call Call Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "formatwarning",
    "source_code": "def formatwarning(self, message, category, filename, lineno, line=None):\n    return 'CherryPy Checker:\\n%s\\n\\n' % message",
    "docstring": "Format a warning.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpchecker.py",
    "ast_data": "FunctionDef name:formatwarning arg:self arg:message arg:category arg:filename arg:lineno arg:line arguments arg arg arg arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_scope_vals",
    "source_code": "def _scope_vals(self, vals):\n    if isinstance(vals, (list, tuple)):\n        return vals\n    elif isinstance(vals, dict):\n        return vals.values()\n    else:\n        return [vals]",
    "docstring": "Return a list of values to pass to . Args: vals: A tensor, a list or tuple of tensors, or a dictionary. Returns: The values in vals as a list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:_scope_vals arg:self arg:vals arguments arg arg If Call Return return:yes If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_compressed_ids",
    "source_code": "def get_compressed_ids(labels, sizes: Shape) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]:\n    ids = get_group_index(labels, sizes, sort=True, xnull=False)\n    return compress_group_index(ids, sort=True)",
    "docstring": "Group_index is offsets into cartesian product of all possible labels. This space can be huge, so this function compresses it, by computing offsets (comp_ids) into the list of unique labels (obs_group_ids). Parameters ---------- labels : list of label arrays sizes : tuple[int] of size of the levels Returns ------- np.ndarray[np.intp] comp_ids np.ndarray[np.int64] obs_group_ids",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\sorting.py",
    "ast_data": "FunctionDef name:get_compressed_ids arg:labels arg:sizes arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_calc_dual_canonical_window",
    "source_code": "def _calc_dual_canonical_window(win: np.ndarray, hop: int) -> np.ndarray:\n    if hop > len(win):\n        raise ValueError(f'hop={hop!r} is larger than window length of {len(win)}' + ' => STFT not invertible!')\n    if issubclass(win.dtype.type, np.integer):\n        raise ValueError(\"Parameter 'win' cannot be of integer type, but \" + f'win.dtype={win.dtype!r} => STFT not invertible!')\n    w2 = win.real ** 2 + win.imag ** 2\n    DD = w2.copy()\n    for k_ in range(hop, len(win), hop):\n        DD[k_:] += w2[:-k_]\n        DD[:-k_] += w2[k_:]\n    relative_resolution = np.finfo(win.dtype).resolution * max(DD)\n    if not np.all(DD >= relative_resolution):\n        raise ValueError('Short-time Fourier Transform not invertible!')\n    return win / DD",
    "docstring": "Calculate canonical dual window for 1d window and a time step of samples. A ``.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:_calc_dual_canonical_window arg:win arg:hop arguments arg arg If Compare Call Raise Call Call If Call Raise Call Assign Assign Call For Call Call Assign Call Call If Call Compare Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "all_valid",
    "source_code": "def all_valid(formsets):\n    return all([formset.is_valid() for formset in formsets])",
    "docstring": "Validate every formset and return True if all are valid.",
    "type": "function",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:all_valid arg:formsets arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_images",
    "source_code": "def get_images(self):\n    return cbook.silent_list('AxesImage', self.images)",
    "docstring": "Return a list of \\s contained by the Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_images arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "public_key",
    "source_code": "@abc.abstractmethod\ndef public_key(self) -> DSAPublicKey:\n    pass",
    "docstring": "The DSAPublicKey associated with this private key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:public_key arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "BinaryAccuracy",
    "source_code": "class BinaryAccuracy(MeanMetricWrapper):\n\n    def __init__(self, name='binary_accuracy', dtype=None, threshold=0.5):\n        super(BinaryAccuracy, self).__init__(binary_accuracy, name, dtype=dtype, threshold=threshold)",
    "docstring": "Calculates how often predictions match binary labels. This metric creates two local variables, and that are used to compute the frequency with which matches . This frequency is ultimately returned as : an idempotent operation that simply divides by . If is , weights default to 1. Use of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. threshold: (Optional) Float representing the threshold for deciding whether prediction values are 1 or 0. Standalone usage: >>> m = tf.keras.metrics.BinaryAccuracy() >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]]) >>> m.result().numpy() 0.75 >>> m.reset_state() >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]], ... sample_weight=[1, 0, 0, 1]) >>> m.result().numpy() 0.5 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:BinaryAccuracy FunctionDef name:__init__ arg:self arg:name arg:dtype arg:threshold arguments arg arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "valid_scalar_name",
    "source_code": "def valid_scalar_name(scalar_name: ScalarName | str) -> bool:\n    return scalar_name in _SCALAR_NAME_TO_TYPE",
    "docstring": "Return whether the given scalar name is a valid JIT scalar type name.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_type_utils.py",
    "ast_data": "FunctionDef name:valid_scalar_name arg:scalar_name arguments arg Return return:yes Compare"
  },
  {
    "library": "pandas",
    "name": "to_pydatetime",
    "source_code": "def to_pydatetime(self) -> npt.NDArray[np.object_]:\n    return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso)",
    "docstring": "Return an ndarray of `` objects. See Also -------- DatetimeIndex.to_julian_date : Converts Datetime Array to float64 ndarray of Julian Dates. Examples -------- >>> idx = pd.date_range(\"2018-02-27\", periods=3) >>> idx.to_pydatetime() array([datetime.datetime(2018, 2, 27, 0, 0), datetime.datetime(2018, 2, 28, 0, 0), datetime.datetime(2018, 3, 1, 0, 0)], dtype=object)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:to_pydatetime arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_sketch_params",
    "source_code": "def set_sketch_params(self, scale=None, length=None, randomness=None):\n    if scale is None:\n        self._sketch = None\n    else:\n        self._sketch = (scale, length or 128.0, randomness or 16.0)\n    self.stale = True",
    "docstring": "Set the sketch parameters. Parameters ---------- scale : float, optional The amplitude of the wiggle perpendicular to the source line, in pixels. If scale is , or not provided, no sketch filter will be provided. length : float, optional The length of the wiggle along the line, in pixels (default 128.0) randomness : float, optional The scale factor by which the length is shrunken or expanded (default 16.0) The PGF backend uses this argument as an RNG seed and not as described above. Using the same seed yields the same random shape. .. ACCEPTS: (scale: float, length: float, randomness: float)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_sketch_params arg:self arg:scale arg:length arg:randomness arguments arg arg arg arg If Compare Assign Assign BoolOp BoolOp Assign"
  },
  {
    "library": "pytorch",
    "name": "_verify_params_per_rank",
    "source_code": "def _verify_params_per_rank(self, params_per_rank: list[list[torch.Tensor]]) -> None:\n    if len(params_per_rank) != self.world_size:\n        raise ValueError('`params_per_rank` must have length equal to the world size')\n    all_params_set = set(self._all_params)\n    for params in params_per_rank:\n        for param in params:\n            if param not in all_params_set:\n                raise ValueError('Passing a new parameter in `params_per_rank` that was not passed into the ZeroRedundancyOptimizer constructor')",
    "docstring": "Verify `_partition_parametersZeroRedundancyOptimizerZeroRedundancyOptimizer` constructor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_verify_params_per_rank arg:self arg:params_per_rank arguments arg arg If Compare Call Raise Call Assign Call For For If Compare Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "header",
    "source_code": "def header(self):\n    return '\\n            <html>\\n            <head>\\n                <title>%s</title>\\n            <head>\\n            <body>\\n            <h2>%s</h2>\\n        ' % (self.title, self.title)",
    "docstring": "Render HTML layout header.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut05_derived_objects.py",
    "ast_data": "FunctionDef name:header arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ensures",
    "source_code": "def ensures(self, graph_module: GraphModule) -> None:\n    pass",
    "docstring": "This function will be called after the pass is run and will check that the given graph module contains the postconditions needed to run the pass. It is not required to implement this function. Args: graph_module: The graph module we will run checks on",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_base.py",
    "ast_data": "FunctionDef name:ensures arg:self arg:graph_module arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "internal_convert_n_to_tensor_or_composite",
    "source_code": "def internal_convert_n_to_tensor_or_composite(values, dtype=None, name=None, as_ref=False) -> list[Union[EagerTensor, SymbolicTensor, composite_tensor.CompositeTensor, type(None)]]:\n    if not isinstance(values, collections_abc.Sequence):\n        raise TypeError('values must be a sequence.')\n    ret = []\n    for i, value in enumerate(values):\n        if value is None:\n            ret.append(value)\n        else:\n            n = None if name is None else '%s_%d' % (name, i)\n            ret.append(internal_convert_to_tensor_or_composite(value, dtype=dtype, name=n, as_ref=as_ref))\n    return ret",
    "docstring": "Converts to a list of or objects. Any objects in are returned unmodified. Args: values: A list of , , or objects that can be consumed by . dtype: (Optional.) The required of the returned s or s. name: (Optional.) A name prefix to used when a new is created, in which case element will be given the name . as_ref: True if the caller wants the results as ref tensors. Returns: A list of , , and/or objects. Raises: TypeError: If no conversion function is registered for an element in . RuntimeError: If a registered conversion function returns an invalid value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:internal_convert_n_to_tensor_or_composite arg:values arg:dtype arg:name arg:as_ref arguments arg arg arg arg If Call Raise Call Assign For Call If Compare Call Assign Compare Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_unbacked_symbool",
    "source_code": "@record_shapeenv_event()\ndef create_unbacked_symbool(self) -> SymBool:\n    symbol: sympy.Symbol = make_symbol(SymT.UNBACKED_INT, next(self.unbacked_symint_counter), integer=True)\n    if not self._ignore_fresh_unbacked_symbols_tls():\n        self.pending_fresh_unbacked_symbols.append(symbol)\n    self.counter['create_unbacked_symbol'] += 1\n    self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1)\n    vr = self.var_to_range[symbol] = ValueRanges(0, 1)\n    assert vr.is_int\n    sloc = self._get_sloc('default value range for unbacked SymBool')\n    self.var_to_range_sloc[symbol] = ValueRangesSLoc(sloc, sloc)\n    fx_node = self._create_fx_placeholder_and_z3var(symbol, bool)\n    sym_node = SymNode(sympy.Eq(symbol, 1), self, bool, None, fx_node=fx_node)\n    self._log_create_unbacked_symbol('create_unbacked_symbool', symbol, vr, sym_node=sym_node)\n    return SymBool(sym_node)",
    "docstring": "Create a symbolic boolean without a hint value",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:create_unbacked_symbool arg:self arguments arg Call Call If Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dtypes, shapes, names, queue_ref):\n    self._dtypes = dtypes\n    if shapes is not None:\n        if len(shapes) != len(dtypes):\n            raise ValueError(f'Queue shapes must have the same length as dtypes, received len(shapes)={len(shapes)}, len(dtypes)={len(dtypes)}')\n        self._shapes = [tensor_shape.TensorShape(s) for s in shapes]\n    else:\n        self._shapes = [tensor_shape.unknown_shape() for _ in self._dtypes]\n    if names is not None:\n        if len(names) != len(dtypes):\n            raise ValueError(f'Queue names must have the same length as dtypes,received len(names)={len(names)},len {len(dtypes)}')\n        self._names = names\n    else:\n        self._names = None\n    self._queue_ref = queue_ref\n    if isinstance(queue_ref, ops.EagerTensor):\n        if context.context().scope_name:\n            self._name = context.context().scope_name\n        else:\n            self._name = 'Empty'\n        self._resource_deleter = resource_variable_ops.EagerResourceDeleter(queue_ref, None)\n    else:\n        self._name = self._queue_ref.op.name.split('/')[-1]",
    "docstring": "Constructs a queue object from a queue reference. The two optional lists, and , must be of the same length as if provided. The values at a given index indicate the shape and name to use for the corresponding queue component in . Args: dtypes: A list of types. The length of dtypes must equal the number of tensors in each element. shapes: Constraints on the shapes of tensors in an element: A list of shape tuples or None. This list is the same length as dtypes. If the shape of any tensors in the element are constrained, all must be; shapes can be None if the shapes should not be constrained. names: Optional list of names. If provided, the and methods will use dictionaries with these names as keys. Must be None or a list or tuple of the same length as . queue_ref: The queue reference, i.e. the output of the queue op. Raises: ValueError: If one of the arguments is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dtypes arg:shapes arg:names arg:queue_ref arguments arg arg arg arg arg Assign If Compare If Compare Call Call Raise Call Call Call Assign Call Assign Call If Compare If Compare Call Call Raise Call Call Call Assign Assign Assign If Call If Call Assign Call Assign Assign Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_jaccard",
    "source_code": "def _jaccard(a_rows, a_cols, b_rows, b_cols):\n    intersection = (a_rows * b_rows).sum() * (a_cols * b_cols).sum()\n    a_size = a_rows.sum() * a_cols.sum()\n    b_size = b_rows.sum() * b_cols.sum()\n    return intersection / (a_size + b_size - intersection)",
    "docstring": "Jaccard coefficient on the elements of the two biclusters.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_bicluster.py",
    "ast_data": "FunctionDef name:_jaccard arg:a_rows arg:a_cols arg:b_rows arg:b_cols arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_align_matrices",
    "source_code": "def _align_matrices(x, y):\n    x_matrix = _to_matrix(x)\n    y_matrix = _to_matrix(y)\n    x_shape = x_matrix.shape\n    y_shape = y_matrix.shape\n    if y_shape[1] != x_shape[1]:\n        raise ValueError('The outermost dimensions of the input tensors should match. Given: {} vs {}.'.format(y_shape[1], x_shape[1]))\n    x_tile = array_ops.tile(array_ops.expand_dims(x_matrix, 1), [1, y_shape[0], 1])\n    y_tile = array_ops.tile(array_ops.expand_dims(y_matrix, 0), [x_shape[0], 1, 1])\n    return (x_tile, y_tile)",
    "docstring": "Aligns x and y tensors to allow computations over pairs of their rows.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\kernelized_utils.py",
    "ast_data": "FunctionDef name:_align_matrices arg:x arg:y arguments arg arg Assign Call Assign Call Assign Assign If Compare Raise Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_getdtype",
    "source_code": "@classmethod\ndef _getdtype(cls, val):\n    return np.array(val).dtype",
    "docstring": "Returns the dtype of the input variable.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "FunctionDef name:_getdtype arg:cls arg:val arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "cy",
    "source_code": "@property\ndef cy(self) -> Tensor:\n    return self.rectified_left_camera[..., 1, 2]",
    "docstring": "Return the y-coordinate of the principal point. Note that the y-coordinate of the principal points is assumed to be equal for the left and right camera. Returns: tensor of shape :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py",
    "ast_data": "FunctionDef name:cy arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_irfft_wrapper",
    "source_code": "def _irfft_wrapper(ifft_fn, fft_rank, default_name):\n\n    def _irfft(input_tensor, fft_length=None, name=None):\n        with _ops.name_scope(name, default_name, [input_tensor, fft_length]) as name:\n            input_tensor = _ops.convert_to_tensor(input_tensor, preferred_dtype=_dtypes.complex64)\n            input_tensor.shape.with_rank_at_least(fft_rank)\n            if input_tensor.dtype not in (_dtypes.complex64, _dtypes.complex128):\n                raise ValueError('IRFFT requires tf.complex64 or tf.complex128 inputs, got: %s' % input_tensor)\n            complex_dtype = input_tensor.dtype\n            real_dtype = complex_dtype.real_dtype\n            if fft_length is None:\n                fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank)\n            else:\n                fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)\n            input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=True)\n            fft_length_static = _tensor_util.constant_value(fft_length)\n            if fft_length_static is not None:\n                fft_length = fft_length_static\n            return ifft_fn(input_tensor, fft_length, Treal=real_dtype, name=name)\n    _irfft.__doc__ = re.sub('`input`', '`input_tensor`', re.sub('    Treal.*?\\n', '', ifft_fn.__doc__))\n    return _irfft",
    "docstring": "Wrapper around gen_spectral_ops.irfft* that infers fft_length argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py",
    "ast_data": "FunctionDef name:_irfft_wrapper arg:ifft_fn arg:fft_rank arg:default_name arguments arg arg arg FunctionDef name:_irfft arg:input_tensor arg:fft_length arg:name arguments arg arg arg With Call Assign Call Call If Compare Raise Call Assign Assign If Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign Return return:yes Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_fastpath_enabled",
    "source_code": "def set_fastpath_enabled(value: bool) -> None:\n    global _is_fastpath_enabled\n    _is_fastpath_enabled = value",
    "docstring": "Sets whether fast path is enabled",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\mha\\__init__.py",
    "ast_data": "FunctionDef name:set_fastpath_enabled arg:value arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "PaddingSpec",
    "source_code": "@tf_export(v1=['tpu.PaddingSpec'])\nclass PaddingSpec(enum.IntEnum):\n    AUTO = 0\n    POWER_OF_TWO = 1",
    "docstring": "Represents the type of padding policies for tpu.replicate.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py",
    "ast_data": "ClassDef name:PaddingSpec Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_nvtx_range_push",
    "source_code": "def _nvtx_range_push(name: str):\n    if torch.cuda.is_available():\n        torch.cuda.nvtx.range_push(name)",
    "docstring": "If PyTorch is installed with CUDA support, this starts NVTX range. Check torch.cuda.nvtx.range_push's document for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py",
    "ast_data": "FunctionDef name:_nvtx_range_push arg:name arguments arg If Call Call"
  },
  {
    "library": "uvicorn",
    "name": "_install_sigquit_handler",
    "source_code": "def _install_sigquit_handler(self) -> None:\n    loop = asyncio.get_running_loop()\n    loop.add_signal_handler(signal.SIGQUIT, self.handle_exit, signal.SIGQUIT, None)",
    "docstring": "Install a SIGQUIT handler on workers. - -",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\workers.py",
    "ast_data": "FunctionDef name:_install_sigquit_handler arg:self arguments arg Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "check_readonly_memmap_input",
    "source_code": "@ignore_warnings(category=FutureWarning)\ndef check_readonly_memmap_input(name, estimator_orig):\n    X, y = make_blobs(random_state=0, n_samples=21)\n    X = _enforce_estimator_tags_X(estimator_orig, X)\n    estimator = clone(estimator_orig)\n    y = _enforce_estimator_tags_y(estimator, y)\n    X, y = create_memmap_backed_data([X, y])\n    set_random_state(estimator)\n    assert estimator.fit(X, y) is estimator",
    "docstring": "Check that the estimator can handle readonly memmap backed data. This is particularly needed to support joblib parallelisation.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_readonly_memmap_input arg:name arg:estimator_orig arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Compare Call Call"
  },
  {
    "library": "django",
    "name": "BCryptPasswordHasher",
    "source_code": "class BCryptPasswordHasher(BCryptSHA256PasswordHasher):\n    algorithm = 'bcrypt'\n    digest = None",
    "docstring": "Secure password hashing using the bcrypt algorithm This is considered by many to be the most secure algorithm but you must first install the bcrypt library. Please be warned that this library depends on native C code and might cause portability issues. This hasher does not first hash the password which means it is subject to bcrypt's 72 bytes password truncation. Most use cases should prefer the BCryptSHA256PasswordHasher.",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\hashers.py",
    "ast_data": "ClassDef name:BCryptPasswordHasher Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "conv_input_length",
    "source_code": "def conv_input_length(output_length, filter_size, padding, stride):\n    if output_length is None:\n        return None\n    assert padding in {'same', 'valid', 'full'}\n    if padding == 'same':\n        pad = filter_size // 2\n    elif padding == 'valid':\n        pad = 0\n    elif padding == 'full':\n        pad = filter_size - 1\n    return (output_length - 1) * stride - 2 * pad + filter_size",
    "docstring": "Determines input length of a convolution given output length. Args: output_length: integer. filter_size: integer. padding: one of \"same\", \"valid\", \"full\". stride: integer. Returns: The input length (integer).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\conv_utils.py",
    "ast_data": "FunctionDef name:conv_input_length arg:output_length arg:filter_size arg:padding arg:stride arguments arg arg arg arg If Compare Return return:no Compare If Compare Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_set_mutable",
    "source_code": "def _set_mutable(self, mutable):\n    object.__setattr__(self, '_mutable', mutable)",
    "docstring": "Change the mutability value to on this options and children.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\options.py",
    "ast_data": "FunctionDef name:_set_mutable arg:self arg:mutable arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "run_ui",
    "source_code": "def run_ui(self, init_command=None, title=None, title_color=None, enable_mouse_on_start=True):\n    print(title)\n    if init_command is not None:\n        self._dispatch_command(init_command)\n    exit_token = self._ui_loop()\n    if self._on_ui_exit:\n        self._on_ui_exit()\n    return exit_token",
    "docstring": "Run the CLI: See the doc of base_ui.BaseUI.run_ui for more details.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\readline_ui.py",
    "ast_data": "FunctionDef name:run_ui arg:self arg:init_command arg:title arg:title_color arg:enable_mouse_on_start arguments arg arg arg arg arg Call If Compare Call Assign Call If Call Return return:yes"
  },
  {
    "library": "django",
    "name": "override",
    "source_code": "class override(ContextDecorator):\n\n    def __init__(self, timezone):\n        self.timezone = timezone\n\n    def __enter__(self):\n        self.old_timezone = getattr(_active, 'value', None)\n        if self.timezone is None:\n            deactivate()\n        else:\n            activate(self.timezone)\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        if self.old_timezone is None:\n            deactivate()\n        else:\n            _active.value = self.old_timezone",
    "docstring": "Temporarily set the time zone for the current thread. This is a context manager that uses django.utils.timezone.activate() to set the timezone on entry and restores the previously active timezone on exit. The ``, Django enables the default time zone.",
    "type": "class",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "ClassDef name:override FunctionDef name:__init__ arg:self arg:timezone arguments arg arg Assign FunctionDef name:__enter__ arg:self arguments arg Assign Call If Compare Call Call FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg If Compare Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "backend",
    "source_code": "@doc_controls.do_not_generate_docs\ndef backend():\n    return 'tensorflow'",
    "docstring": "Publicly accessible method for determining the current backend. Only exists for API compatibility with multi-backend Keras. Returns: The string \"tensorflow\".",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:backend arguments Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "track",
    "source_code": "def track(self, font, s):\n    char_to_font = font._get_fontmap(s)\n    for _c, _f in char_to_font.items():\n        self.used.setdefault(_f.fname, set()).add(ord(_c))",
    "docstring": "Record that string *s* is being typeset using font *font*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_pdf_ps.py",
    "ast_data": "FunctionDef name:track arg:self arg:font arg:s arguments arg arg arg Assign Call For Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "device",
    "source_code": "class device:\n\n    def __init__(self, device: Any):\n        self.idx = _get_device_index(device, optional=True)\n        self.prev_idx = -1\n\n    def __enter__(self):\n        self.prev_idx = torch.cuda._exchange_device(self.idx)\n\n    def __exit__(self, type: Any, value: Any, traceback: Any):\n        self.idx = torch.cuda._maybe_exchange_device(self.prev_idx)\n        return False",
    "docstring": "Context-manager that changes the selected device. Args: device (torch.device or int): device index to select. It's a no-op if this argument is a negative integer or ``.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "ClassDef name:device FunctionDef name:__init__ arg:self arg:device arguments arg arg Assign Call Assign FunctionDef name:__enter__ arg:self arguments arg Assign Call FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set",
    "source_code": "def set(self, property_name, property_val):\n    if property_name not in self._config:\n        raise KeyError('%s is not a valid property name.' % property_name)\n    orig_val = self._config[property_name]\n    if isinstance(orig_val, bool):\n        if isinstance(property_val, str):\n            if property_val.lower() in ('1', 'true', 't', 'yes', 'y', 'on'):\n                property_val = True\n            elif property_val.lower() in ('0', 'false', 'f', 'no', 'n', 'off'):\n                property_val = False\n            else:\n                raise ValueError('Invalid string value for bool type: %s' % property_val)\n        else:\n            property_val = bool(property_val)\n    elif isinstance(orig_val, int):\n        property_val = int(property_val)\n    elif isinstance(orig_val, str):\n        property_val = str(property_val)\n    else:\n        raise TypeError('Unsupported property type: %s' % type(orig_val))\n    self._config[property_name] = property_val\n    self._save_to_file()\n    if property_name in self._set_callbacks:\n        self._set_callbacks[property_name](self._config)",
    "docstring": "Set the value of a property. Supports limitd property value types: , and . Args: property_name: Name of the property. property_val: Value of the property. If the property has type and this argument has type, the value will be parsed as a Raises: ValueError: if a property_value fails to be parsed as a . KeyError: if is an invalid property name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\cli_config.py",
    "ast_data": "FunctionDef name:set arg:self arg:property_name arg:property_val arguments arg arg arg If Compare Raise Call Assign If Call If Call If Compare Call Assign If Compare Call Assign Raise Call Assign Call If Call Assign Call If Call Assign Call Raise Call Call Assign Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "DistributedIteratorSpec",
    "source_code": "class DistributedIteratorSpec(DistributedDatasetAndIteratorSpec):\n\n    @property\n    def value_type(self):\n        return DistributedIterator\n\n    @property\n    def _component_specs(self):\n        specs = []\n        worker_device_pairs = self._input_workers._worker_device_pairs\n        for i, (input_device, compute_devices) in enumerate(worker_device_pairs):\n            element_spec = nest.map_structure(functools.partial(_replace_per_replica_spec, i=i), self._element_spec)\n            specs.append(_SingleWorkerDatasetIteratorSpec(input_device, compute_devices, element_spec, self._options, self._canonicalize_devices))\n        return specs\n\n    def _to_components(self, value):\n        return value._iterators\n\n    def _from_components(self, components):\n        return DistributedIterator(input_workers=self._input_workers, iterators=None, components=components, element_spec=self._element_spec, strategy=self._strategy, cardinality=self._cardinality, enable_get_next_as_optional=self._enable_get_next_as_optional, options=self._options, replica_order=self._replica_order)\n\n    @staticmethod\n    def from_value(value):\n        return DistributedIteratorSpec(value._input_workers, value._element_spec, value._strategy, value._options, cardinality=value._cardinality, enable_get_next_as_optional=value._enable_get_next_as_optional)",
    "docstring": "Type specification for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "ClassDef name:DistributedIteratorSpec FunctionDef name:value_type arg:self arguments arg Return return:yes FunctionDef name:_component_specs arg:self arguments arg Assign Assign For Call Assign Call Call Call Call Return return:yes FunctionDef name:_to_components arg:self arg:value arguments arg arg Return return:yes FunctionDef name:_from_components arg:self arg:components arguments arg arg Return return:yes Call FunctionDef name:from_value arg:value arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "match",
    "source_code": "def match(self, line_seg1: Tensor, line_seg2: Tensor, desc1: Tensor, desc2: Tensor) -> Tensor:\n    return self.line_matcher(line_seg1, line_seg2, desc1, desc2)",
    "docstring": "Find the best matches between two sets of line segments and their corresponding descriptors. Args: line_seg1: list of line segments in image 1, with shape [num_lines, 2, 2]. line_seg2: list of line segments in image 2, with shape [num_lines, 2, 2]. desc1: semi-dense descriptor map of image 1, with shape [1, 128, H/4, W/4]. desc2: semi-dense descriptor map of image 2, with shape [1, 128, H/4, W/4]. Returns: A np.array of size [num_lines1] indicating the index in line_seg2 of the matched line, for each line in line_seg1. -1 means that the line is not matched.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\sold2\\sold2.py",
    "ast_data": "FunctionDef name:match arg:self arg:line_seg1 arg:line_seg2 arg:desc1 arg:desc2 arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_PostInitInjectionMetaClass",
    "source_code": "class _PostInitInjectionMetaClass(type):\n\n    def __call__(cls: Type[T], *args: Any, **kwargs: Any) -> T:\n        obj = type.__call__(cls, *args, **kwargs)\n        obj.__post_init__()\n        return obj",
    "docstring": "To inject the `` function after the creation of each instance.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\base.py",
    "ast_data": "ClassDef name:_PostInitInjectionMetaClass FunctionDef name:__call__ arg:cls arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_experimental_default_mesh",
    "source_code": "@contextlib.contextmanager\ndef _experimental_default_mesh(self, mesh: layout_lib.Mesh):\n    previous_default = self._current_default_mesh\n    self._register_mesh(mesh)\n    _pywrap_dtensor_device.ExperimentalSetDefaultMesh(self._device_info, mesh.to_string().encode('utf-8'))\n    self._current_default_mesh = mesh\n    yield\n    _pywrap_dtensor_device.ExperimentalClearDefaultMesh(self._device_info)\n    if previous_default:\n        _pywrap_dtensor_device.ExperimentalSetDefaultMesh(self._device_info, previous_default.to_string().encode('utf-8'))\n    self._current_default_mesh = previous_default",
    "docstring": "Sets a default mesh for all ops in the scope. Note: This is an internal helper method, which is not user facing api. Useful for requesting a specific mesh for ops which would have no inferred layout, e.g. tf.zeros. Args: mesh: A Mesh to be used for ops without Mesh. Yields: Nothing.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py",
    "ast_data": "FunctionDef name:_experimental_default_mesh arg:self arg:mesh arguments arg arg Assign Call Call Call Call Assign Call If Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "enter_cond_section",
    "source_code": "def enter_cond_section(self, section_id):\n    assert section_id not in self.cond_entry\n    assert section_id not in self.cond_leaves\n    self.cond_leaves[section_id] = []",
    "docstring": "Enters a conditional section. Conditional sections define an entry node, and one or more branches. Args: section_id: Hashable, the same node that will be used in calls to the section_id arg passed to new_cond_branch",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:enter_cond_section arg:self arg:section_id arguments arg arg Compare Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "_TopKGrad",
    "source_code": "@ops.RegisterGradient('TopK')\n@ops.RegisterGradient('TopKV2')\ndef _TopKGrad(op: ops.Operation, grad, _):\n    in_shape = array_ops.shape(op.inputs[0])\n    ind_shape = array_ops.shape(op.outputs[1])\n    ind_lastdim = array_ops.gather(math_ops.cast(ind_shape, dtypes.int64), array_ops.size(ind_shape) - 1)\n    ind_2d = array_ops.reshape(op.outputs[1], array_ops_stack.stack([-1, ind_lastdim]))\n    in_lastdim = array_ops.gather(math_ops.cast(in_shape, dtypes.int64), array_ops.size(in_shape) - 1)\n    outerdim = array_ops.shape(ind_2d)[0]\n    ind = array_ops.reshape(ind_2d + math_ops.cast(array_ops.expand_dims(math_ops.range(0, math_ops.cast(outerdim, dtypes.int64) * in_lastdim, in_lastdim), -1), dtypes.int32), [-1])\n    return [array_ops.reshape(array_ops.scatter_nd(array_ops.expand_dims(ind, -1), array_ops.reshape(grad, [-1]), [math_ops.reduce_prod(in_shape)]), in_shape), array_ops.zeros([], dtype=dtypes.int32)]",
    "docstring": "Return the gradients for TopK. Args: op: The TopKOp for which we need to generate gradients. grad: Tensor. The gradients passed to the TopKOp. Returns: A list of two tensors, the first being the gradient w.r.t to the input and TopK, and the second being the gradient w.r.t. to the indices (all zero).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_TopKGrad arg:op arg:grad arg:_ arguments arg arg arg Assign Call Assign Call Assign Call Call Call Assign Call Call Assign Call Call Call Assign Call Assign Call Call Call Call Call Return return:yes Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_NextAfterGrad",
    "source_code": "@ops.RegisterGradient('NextAfter')\ndef _NextAfterGrad(op: ops.Operation, grad):\n    x1 = op.inputs[0]\n    x2 = op.inputs[1]\n    s_x1 = array_ops.shape(x1)\n    s_x2 = array_ops.shape(x2)\n    r_x1, r_x2 = gen_array_ops.broadcast_gradient_args(s_x1, s_x2)\n    with ops.control_dependencies([grad]):\n        partial_x1 = array_ops.ones(s_x1, dtype=x1.dtype)\n        partial_x2 = array_ops.zeros(s_x2, dtype=x2.dtype)\n        return (array_ops.reshape(math_ops.reduce_sum(partial_x1 * grad, r_x1), s_x1), array_ops.reshape(math_ops.reduce_sum(partial_x2 * grad, r_x2), s_x2))",
    "docstring": "Returns gradient of nextafter(x1, x2) with respect to x1 and x2.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_NextAfterGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call With Call Assign Call Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "get_app_template_dirs",
    "source_code": "@functools.lru_cache\ndef get_app_template_dirs(dirname):\n    return tuple((path for app_config in apps.get_app_configs() if app_config.path and (path := (Path(app_config.path) / dirname)).is_dir()))",
    "docstring": "Return an iterable of paths of directories to load app templates from. dirname is the name of the subdirectory containing templates inside installed applications.",
    "type": "function",
    "file_path": "django\\django\\template\\utils.py",
    "ast_data": "FunctionDef name:get_app_template_dirs arg:dirname arguments arg Return return:yes Call Call BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "__exit__",
    "source_code": "def __exit__(self, exc_type, exc_val, exc_tb):\n    if not self.enabled:\n        return\n    process_global_events = _disable_server_process_global_profiler()\n    process_global_function_events = []\n    for thread_local_events in process_global_events:\n        thread_local_function_events = torch.autograd.profiler_legacy._parse_legacy_records(thread_local_events)\n        thread_local_function_events.sort(key=lambda function_event: [function_event.time_range.start, -function_event.time_range.end])\n        process_global_function_events.append(thread_local_function_events)\n    flattened_function_events = list(itertools.chain.from_iterable(process_global_function_events))\n    self.function_events = torch.autograd.profiler_util.EventList(flattened_function_events, use_device='cuda' if self.use_cuda else None, profile_memory=self.profile_memory)\n    self.function_events._build_tree()\n    self.process_global_function_events = process_global_function_events\n    return False",
    "docstring": "Turn off server-side process-global profiling. Aggregate all profiling events recorded by RPC threads. These attributes are assigned on exiting context. Attributes: function_events (torch.autograd.profiler.EventList). It's a list that has helper methods, like 1) show record items in a pretty-print table. 2) do averaging by grouping on keys. 3) and more. process_global_function_events (List[torch.autograd.profiler.FunctionEvent]). It's a list of `` elements. Every element is a profiling result of an RPC request handling within the profiling range.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\rpc\\server_process_global_profiler.py",
    "ast_data": "FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg If Return return:no Assign Call Assign For Assign Call Call arguments arg Call Assign Call Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "before_request",
    "source_code": "def before_request(self):\n    self.count += 1",
    "docstring": "Increment the counter before HTTP request.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\gctools.py",
    "ast_data": "FunctionDef name:before_request arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "_max_in_bounds",
    "source_code": "def _max_in_bounds(self, max):\n    if max >= self.valmax:\n        if not self.closedmax:\n            return self.val[1]\n        max = self.valmax\n    if max <= self.val[0]:\n        max = self.val[0]\n    return self._stepped_value(max)",
    "docstring": "Ensure the new max value is between valmax and self.val[0].",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_max_in_bounds arg:self arg:max arguments arg arg If Compare If Return return:yes Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_trackable_children",
    "source_code": "def _trackable_children(self, save_type=base.SaveType.CHECKPOINT, **kwargs):\n    self._check_self_external_modification()\n    if self._self_non_string_key:\n        raise ValueError(f\"Unable to save the object {self} (a dictionary wrapper constructed automatically on attribute assignment). The wrapped dictionary contains a non-string key which maps to a trackable object or mutable data structure.\\n\\nIf you don't need this dictionary checkpointed, wrap it in a non-trackable object; it will be subsequently ignored.\")\n    if self._self_external_modification:\n        raise ValueError(f\"Unable to save the object {self} (a dictionary wrapper constructed automatically on attribute assignment). The wrapped dictionary was modified outside the wrapper (its final value was {self}, its value when a checkpoint dependency was added was {self._self_last_wrapped_dict_snapshot}), which breaks restoration on object creation.\\n\\nIf you don't need this dictionary checkpointed, wrap it in a non-trackable object; it will be subsequently ignored.\")\n    assert not self._dirty\n    children = super()._trackable_children(save_type, **kwargs)\n    if save_type == base.SaveType.SAVEDMODEL:\n        children.update({key: value for key, value in self.items() if _is_function(value)})\n    return children",
    "docstring": "Check that the object is saveable before listing its dependencies.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_trackable_children arg:self arg:save_type arguments arg arg arg Call If Raise Call If Raise Call Assign Call Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "max_memory_reserved",
    "source_code": "def max_memory_reserved(device: 'Device'=None) -> int:\n    return memory_stats(device=device).get('reserved_bytes.all.peak', 0)",
    "docstring": "Return the maximum GPU memory managed by the caching allocator in bytes for a given device. By default, this returns the peak cached memory since the beginning of this program. :func: can be used to reset the starting point in tracking this metric. For example, these two functions can measure the peak cached memory amount of each iteration in a training loop. Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `cuda-memory-management` for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:max_memory_reserved arg:device arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "eye_like",
    "source_code": "def eye_like(n: int, input: Tensor, shared_memory: bool=False) -> Tensor:\n    if n <= 0:\n        raise AssertionError(type(n), n)\n    if len(input.shape) < 1:\n        raise AssertionError(input.shape)\n    identity = eye(n, device=input.device).type(input.dtype)\n    return identity[None].expand(input.shape[0], n, n) if shared_memory else identity[None].repeat(input.shape[0], 1, 1)",
    "docstring": "Return a 2-D tensor with ones on the diagonal and zeros elsewhere with the same batch size as the input. Args: n: the number of rows :math:. input: image tensor that will determine the batch size of the output matrix. The expected shape is :math:. shared_memory: when set, all samples in the batch will share the same memory. Returns: The identity matrix with the same batch size as the input :math:. Notes: When the dimension to expand is of size 1, using torch.expand(...) yields the same tensor as torch.repeat(...) without using extra memory. Thus, when the tensor obtained by this method will be later assigned - use this method with shared_memory=False, otherwise, prefer using it with shared_memory=True.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\misc.py",
    "ast_data": "FunctionDef name:eye_like arg:n arg:input arg:shared_memory arguments arg arg arg If Compare Raise Call Call If Compare Call Raise Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_apply_docstring_templates",
    "source_code": "def _apply_docstring_templates(func: Callable[_P, _T]) -> Callable[_P, _T]:\n    doc_string = getattr(_docs, f'{func.__name__}_docstring', None)\n    if doc_string is None:\n        warnings.warn(f'No documentation string available for {func.__name__}. PyTorch team should run `python tools/update_masked_docs.py` to generate the missing docstrings.')\n    else:\n        func.__doc__ = doc_string\n    __all__.append(func.__name__)\n    return func",
    "docstring": "Decorator that applies docstring templates to function docstring and returns the function instance.",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\_ops.py",
    "ast_data": "FunctionDef name:_apply_docstring_templates arg:func arguments arg Assign Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reorder_post_acc_grad_hook_nodes",
    "source_code": "def reorder_post_acc_grad_hook_nodes(self):\n    post_acc_grad_hooks = []\n    for node in self.fx_tracer.graph.find_nodes(op='call_function', target=call_hook):\n        if node.kwargs.get('hook_type', None) != 'post_acc_grad_hook':\n            continue\n        post_acc_grad_hooks.append(node)\n    for node in reversed(post_acc_grad_hooks):\n        getitem_node = node.args[0]\n        param_node = node.args[1]\n        acc_grad_node = None\n        for n in list(param_node.users.keys()):\n            if n.op == 'call_function' and n.target == torch.ops.inductor.accumulate_grad_.default:\n                acc_grad_node = n\n                break\n        assert acc_grad_node is not None, 'post_acc_grad_hook must have corresponding acc grad node'\n        acc_grad_node.append(getitem_node)\n        getitem_node.append(node)",
    "docstring": "Usage of AOTAutograd causes all the post_acc_grad_hook nodes to get pushed to the end of the graph. This differs from eager mode, which schedules them as soon as possible. This pass attempts to reorder the graph to mimic eager behavior.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\compiled_autograd.py",
    "ast_data": "FunctionDef name:reorder_post_acc_grad_hook_nodes arg:self arguments arg Assign For Call If Compare Call Call For Call Assign Assign Assign For Call Call If BoolOp Compare Compare Assign Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_tensors",
    "source_code": "def get_tensors(graph):\n    if not isinstance(graph, ops.Graph):\n        raise TypeError('Expected a graph, got: {}'.format(type(graph)))\n    ts = []\n    for op in graph.get_operations():\n        ts += op.outputs\n    return ts",
    "docstring": "get all the tensors which are input or output of an op in the graph. Args: graph: a . Returns: A list of . Raises: TypeError: if graph is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py",
    "ast_data": "FunctionDef name:get_tensors arg:graph arguments arg If Call Raise Call Call Call Assign For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "as_dense_types",
    "source_code": "def as_dense_types(types, classes):\n    ret = nest.pack_sequence_as(types, [dtypes.variant if c is sparse_tensor.SparseTensor else ty for ty, c in zip(nest.flatten(types), nest.flatten(classes))])\n    return ret",
    "docstring": "Converts sparse tensor types to . Args: types: a structure of types to convert. classes: a structure of objects that identify the dataset item classes Returns: a structure matching the nested structure of , containing at positions where contains and matching contents of otherwise",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\sparse.py",
    "ast_data": "FunctionDef name:as_dense_types arg:types arg:classes arguments arg arg Assign Call Compare Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_managed_param_to_fqn",
    "source_code": "def _get_managed_param_to_fqn(module_to_wrap: nn.Module, ignored_params: set[nn.Parameter], visited_modules: set[nn.Module], root_prefix: str) -> dict[nn.Parameter, str]:\n    param_to_fqn: dict[nn.Parameter, str] = {}\n    queue = collections.deque([(module_to_wrap, root_prefix)])\n    visited_modules.add(module_to_wrap)\n    while queue:\n        module, prefix = queue.popleft()\n        for param_name, param in module.named_parameters(recurse=False):\n            if param not in ignored_params:\n                fqn = param_name if prefix == '' else prefix + '.' + param_name\n                param_to_fqn[param] = fqn\n        for child_module_name, child_module in module.named_children():\n            if child_module is None:\n                continue\n            if child_module not in visited_modules:\n                visited_modules.add(child_module)\n                child_prefix = child_module_name if prefix == '' else prefix + '.' + child_module_name\n                queue.append((child_module, child_prefix))\n    return param_to_fqn",
    "docstring": "This returns a dict that maps managed parameter to its FQN for the given `` function meant to be called post-wrapping and on the full module tree in one shot. Given those differences, we do not try to unify the two.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_wrap_utils.py",
    "ast_data": "FunctionDef name:_get_managed_param_to_fqn arg:module_to_wrap arg:ignored_params arg:visited_modules arg:root_prefix arguments arg arg arg arg Assign Call Call While Assign Call For Call If Compare Assign Compare Assign For Call If Compare If Compare Call Assign Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "allow_specs",
    "source_code": "@property\ndef allow_specs(self) -> bool:\n    return self._allow_specs",
    "docstring": "Allow TypeSpecs to be casted (instead of the actual CompositeTensors).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\trace_type_builder.py",
    "ast_data": "FunctionDef name:allow_specs arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "calculate_tflops",
    "source_code": "def calculate_tflops(config: ExperimentConfig, time_us: float, is_backward: bool=False, sparsity: float=0.0) -> float:\n    B = config.batch_size\n    H = config.num_heads\n    M = config.q_seq_len\n    N = config.kv_seq_len\n    D = config.head_dim\n    density = 1.0 - sparsity\n    qk_flops = M * N * D * 2\n    softmax_flops = M * N * 2\n    av_flops = M * N * D * 2\n    total_flops = B * H * (qk_flops + softmax_flops + av_flops)\n    total_flops *= density\n    if is_backward:\n        total_flops *= 2.5\n    tflops = total_flops / (time_us * 1e-06) / 1000000000000.0\n    return tflops",
    "docstring": "Calculate TFLOPS for scaled dot product attention. Parameters: - config: The experiment configuration - time_us: The execution time in microseconds - is_backward: Whether to calculate for backward pass (includes gradient computation) - sparsity: Sparsity factor between 0.0 and 1.0, where 0.0 means no sparsity and 1.0 means fully sparse Returns: - TFLOPS value",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\transformer\\sdpa.py",
    "ast_data": "FunctionDef name:calculate_tflops arg:config arg:time_us arg:is_backward arg:sparsity arguments arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign If Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "lagrangian_hessian_x",
    "source_code": "def lagrangian_hessian_x(self, z, v):\n    x = self.get_variables(z)\n    v_eq = v[:self.n_eq]\n    v_ineq = v[self.n_eq:self.n_eq + self.n_ineq]\n    lagr_hess = self.lagr_hess\n    return lagr_hess(x, v_eq, v_ineq)",
    "docstring": "Returns Lagrangian Hessian (in relation to ) -> Hx",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\tr_interior_point.py",
    "ast_data": "FunctionDef name:lagrangian_hessian_x arg:self arg:z arg:v arguments arg arg arg Assign Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "master",
    "source_code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n    if task_type is not None and task_id is not None:\n        master = self.cluster_spec().task_address(task_type, task_id)\n        return format_master_url(master, rpc_layer or self._rpc_layer)\n    return self._cluster_resolvers[0].master(rpc_layer=rpc_layer)",
    "docstring": "Returns the master address to use when creating a session. This usually returns the master from the first ClusterResolver passed in, but you can override this by specifying the task_type and task_id. Note: this is only useful for TensorFlow 1.x. Args: task_type: (Optional) The type of the TensorFlow task of the master. task_id: (Optional) The index of the TensorFlow task of the master. rpc_layer: (Optional) The RPC protocol for the given cluster. Returns: The name or URL of the session master.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:master arg:self arg:task_type arg:task_id arg:rpc_layer arguments arg arg arg arg If BoolOp Compare Compare Assign Call Call Return return:yes Call BoolOp Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "construct_fast",
    "source_code": "@classmethod\ndef construct_fast(cls, c, x, extrapolate=None, axis=0):\n    self = object.__new__(cls)\n    self.c = c\n    self.x = x\n    self.axis = axis\n    if extrapolate is None:\n        extrapolate = True\n    self.extrapolate = extrapolate\n    return self",
    "docstring": "Construct the piecewise polynomial without making checks. Takes the same parameters as the constructor. Input arguments `` array must have dtype float.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:construct_fast arg:cls arg:c arg:x arg:extrapolate arg:axis arguments arg arg arg arg arg Assign Call Assign Assign Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "projection_from_Rt",
    "source_code": "def projection_from_Rt(rmat: Tensor, tvec: Tensor) -> Tensor:\n    if not (len(rmat.shape) >= 2 and rmat.shape[-2:] == (3, 3)):\n        raise AssertionError(rmat.shape)\n    if not (len(tvec.shape) >= 2 and tvec.shape[-2:] == (3, 1)):\n        raise AssertionError(tvec.shape)\n    return concatenate([rmat, tvec], -1)",
    "docstring": "Compute the projection matrix from Rotation and translation. .. warning:: This API signature it is experimental and might suffer some changes in the future. Concatenates the batch of rotations and translations such that :math:. Args: rmat: the rotation matrix with shape :math:. tvec: the translation vector with shape :math:. Returns: the projection matrix with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:projection_from_Rt arg:rmat arg:tvec arguments arg arg If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "xla_is_available",
    "source_code": "def xla_is_available() -> bool:\n    if importlib.util.find_spec('torch_xla') is not None:\n        return True\n    return False",
    "docstring": "Return whether is available in the system.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:xla_is_available arguments If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "VariablePolicy",
    "source_code": "class VariablePolicy(object):\n\n    def __init__(self, aggregation):\n        self._aggregation = aggregation\n\n    def value(self):\n        raise NotImplementedError(f'VariablePolicy.value should be overridden by sub-classes. Type name is {type(self)}')\n\n    def _is_mirrored(self):\n        raise NotImplementedError(f'VariablePolicy._is_mirrored should be overridden by sub-classes. Type name is {type(self)}')\n\n    def _as_graph_element(self, _):\n        raise NotImplementedError(f'VariablePolicy._as_graph_element should be overridden by sub-classes. Type name is {type(self)}')\n\n    def _get_cross_replica(self, var):\n        raise NotImplementedError(f'VariablePolicy._get_cross_replica should be overridden by sub-classes. Type name is {type(self)}')\n\n    def _update_replica(self, var, update_fn, value, **kwargs):\n        raise NotImplementedError(f'VariablePolicy._update_replica should be overridden by sub-classes. Type name is {type(self)}')",
    "docstring": "Policy defining synchronization and aggregation of a distributed variable. Given and parameters set on a during variable creation within scope, creates an appropriate policy object and assigns it to the distributed variable. All variable operations are delegated to the respective policy object.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "ClassDef name:VariablePolicy FunctionDef name:__init__ arg:self arg:aggregation arguments arg arg Assign FunctionDef name:value arg:self arguments arg Raise Call Call FunctionDef name:_is_mirrored arg:self arguments arg Raise Call Call FunctionDef name:_as_graph_element arg:self arg:_ arguments arg arg Raise Call Call FunctionDef name:_get_cross_replica arg:self arg:var arguments arg arg Raise Call Call FunctionDef name:_update_replica arg:self arg:var arg:update_fn arg:value arguments arg arg arg arg arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "check_cuda_lib",
    "source_code": "def check_cuda_lib(path, check_soname=True):\n    if not os.path.isfile(path):\n        raise ConfigError('No library found under: ' + path)\n    objdump = shutil.which('objdump')\n    if check_soname and objdump is not None and (not _is_windows()):\n        output = subprocess.check_output([objdump, '-p', path]).decode('utf-8')\n        output = [line for line in output.splitlines() if 'SONAME' in line]\n        sonames = [line.strip().split(' ')[-1] for line in output]\n        if not any((soname == os.path.basename(path) for soname in sonames)):\n            raise ConfigError('None of the libraries match their SONAME: ' + path)",
    "docstring": "Tests if a library exists on disk and whether its soname matches the filename. Args: path: the path to the library. check_soname: whether to check the soname as well. Raises: ConfigError: If the library does not exist or if its soname does not match the filename.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\check_cuda_libs.py",
    "ast_data": "FunctionDef name:check_cuda_lib arg:path arg:check_soname arguments arg arg If Call Raise Call Assign Call If BoolOp Compare Call Assign Call Call Assign Call Compare Assign Call Call If Call Compare Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "parse_rendezvous_endpoint",
    "source_code": "def parse_rendezvous_endpoint(endpoint: Optional[str], default_port: int) -> tuple[str, int]:\n    if endpoint is not None:\n        endpoint = endpoint.strip()\n    if not endpoint:\n        return ('localhost', default_port)\n    if endpoint[0] == '[' and endpoint[-1] == ']':\n        host, *rest = (endpoint, *[])\n    else:\n        host, *rest = endpoint.rsplit(':', 1)\n    if len(host) > 1 and host[0] == '[' and (host[-1] == ']'):\n        host = host[1:-1]\n    if len(rest) == 1:\n        port = _try_parse_port(rest[0])\n        if port is None or port >= 2 ** 16:\n            raise ValueError(f\"The port number of the rendezvous endpoint '{endpoint}' must be an integer between 0 and 65536.\")\n    else:\n        port = default_port\n    if not re.match('^[\\\\w\\\\.:-]+$', host):\n        raise ValueError(f\"The hostname of the rendezvous endpoint '{endpoint}' must be a dot-separated list of labels, an IPv4 address, or an IPv6 address.\")\n    return (host, port)",
    "docstring": "Extract the hostname and the port number from a rendezvous endpoint. Args: endpoint: A string in format [:]. default_port: The port number to use if the endpoint does not include one. Returns: A tuple of hostname and port number.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\utils.py",
    "ast_data": "FunctionDef name:parse_rendezvous_endpoint arg:endpoint arg:default_port arguments arg arg If Compare Assign Call If Return return:yes If BoolOp Compare Compare Assign Assign Call If BoolOp Compare Call Compare Compare Assign If Compare Call Assign Call If BoolOp Compare Compare Raise Call Assign If Call Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_iter",
    "source_code": "def _iter(self):\n    get_weight = (self.transformer_weights or {}).get\n    for name, trans in self.transformer_list:\n        if trans == 'drop':\n            continue\n        if trans == 'passthrough':\n            trans = FunctionTransformer(feature_names_out='one-to-one')\n        yield (name, trans, get_weight(name))",
    "docstring": "Generate (name, trans, weight) tuples excluding None and 'drop' transformers.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:_iter arg:self arguments arg Assign BoolOp For If Compare If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_predict_begin",
    "source_code": "def on_predict_begin(self, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_predict_begin(logs)",
    "docstring": "Calls the 'on_predict_begin` methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_predict_begin arg:self arg:logs arguments arg arg Assign Call For Call"
  },
  {
    "library": "tensorflow",
    "name": "dtype_numpy",
    "source_code": "@doc_controls.do_not_generate_docs\ndef dtype_numpy(x):\n    return dtypes_module.as_dtype(x.dtype).as_numpy_dtype",
    "docstring": "Returns the numpy dtype of a Keras tensor or variable. Args: x: Tensor or variable. Returns: numpy.dtype, dtype of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:dtype_numpy arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    if self.kernel is None or self.kernel.requires_vector_input:\n        X = validate_data(self, X, ensure_2d=True, dtype='numeric', reset=False)\n    else:\n        X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False)\n    return self.base_estimator_.predict(X)",
    "docstring": "Perform classification on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- C : ndarray of shape (n_samples,) Predicted target values for X, values are from ``.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\_gpc.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call If BoolOp Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "can_use_cudnn_attention",
    "source_code": "def can_use_cudnn_attention(params: SDPAParams, debug: bool=False) -> bool:\n    return torch._C._can_use_cudnn_attention(params, debug)",
    "docstring": "Check if cudnn_attention can be utilized in scaled_dot_product_attention. Args: params: An instance of SDPAParams containing the tensors for query, key, value, an optional attention mask, dropout rate, and a flag indicating if the attention is causal. debug: Whether to logging.warn with information as to why cuDNN attention could not be run. Defaults to False. Returns: True if cuDNN can be used with the given parameters; otherwise, False. Note: This function is dependent on a CUDA-enabled build of PyTorch. It will return False in non-CUDA environments.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:can_use_cudnn_attention arg:params arg:debug arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "current_stream",
    "source_code": "def current_stream(device: Optional[_device_t]=None) -> Stream:\n    _lazy_init()\n    streamdata = torch._C._cuda_getCurrentStream(_get_device_index(device, optional=True))\n    return Stream(stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2])",
    "docstring": "Return the currently selected :class: for a given device. Args: device (torch.device or int, optional): selected device. Returns the currently selected :class: for the current device, given by :func:, if :attr: is `` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:current_stream arg:device arguments arg Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_load_state_dict_post_hook",
    "source_code": "def register_load_state_dict_post_hook(self, hook):\n    handle = RemovableHandle(self._load_state_dict_post_hooks)\n    self._load_state_dict_post_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a post-hook to be run after module's :meth: is called. It should have the following signature:: hook(module, incompatible_keys) -> None The `load_state_dicttorch.utils.hooks.RemovableHandle`",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_load_state_dict_post_hook arg:self arg:hook arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_handle_row_wise_sharding",
    "source_code": "def _handle_row_wise_sharding(input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, rank, pg):\n    gather_inp = _all_gather_base_input(input, pg)\n    lookup_input, padding_idx, padding_row = _handle_row_wise_mask(gather_inp, padding_idx, weight, world_size, rank)\n    if max_norm is not None:\n        torch.nn.functional.embedding(torch.unique(lookup_input)[:-1], local_shard, padding_idx=padding_idx, max_norm=max_norm, norm_type=norm_type)\n        max_norm = None\n    local_input_embeddings = torch.nn.functional.embedding(lookup_input, torch.cat([local_shard, padding_row]), padding_idx=padding_idx, max_norm=max_norm, norm_type=norm_type)\n    local_shards = local_input_embeddings.chunk(pg.size())\n    return reduce_scatter(torch.empty_like(local_shards[0]), list(local_shards), group=pg)",
    "docstring": "Entry-point function to handle the logic of row-wise sharding of weight for embedding. (Detailed explanations of the logic can be found in the comment for sharded_embedding.) Args: input: list of ID used for lookup and aggregation. world_size: number of ranks. weight: sharded weight tensor. local_shard: row-wise shared local weight used for lookup. max_norm: If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. Note: this will modify weight in-place. norm_type: The p in the p-norm to compute for the max_norm option. padding_idx: If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed \"pad\". rank: # of cuda process. pg: process group. Returns: final result of lookup.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\chunk_sharding_spec_ops\\embedding.py",
    "ast_data": "FunctionDef name:_handle_row_wise_sharding arg:input arg:world_size arg:weight arg:local_shard arg:max_norm arg:norm_type arg:padding_idx arg:rank arg:pg arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call If Compare Call Call Assign Assign Call Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_sanitize",
    "source_code": "def _sanitize(self, name):\n    if name and name.startswith('_'):\n        name = 'fn' + name\n    return name",
    "docstring": "See",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\function_wrappers.py",
    "ast_data": "FunctionDef name:_sanitize arg:self arg:name arguments arg arg If BoolOp Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "no_sync",
    "source_code": "@contextmanager\ndef no_sync(self) -> Generator:\n    _lazy_init(self, self)\n    if not self._is_root:\n        raise RuntimeError('`no_sync()` on inner FSDP instances is not supported. Please call `no_sync()` on root FSDP module.')\n    self._assert_state(TrainingState.IDLE)\n    old_flags = []\n    for m in self.modules():\n        if isinstance(m, FullyShardedDataParallel):\n            old_flags.append((m, m._sync_gradients))\n            m._sync_gradients = False\n    try:\n        yield\n    finally:\n        for m, old_flag in old_flags:\n            assert not m._sync_gradients, '`_sync_gradients` was incorrectly set to `True` while in the `no_sync()` context manager'\n            m._sync_gradients = old_flag",
    "docstring": "Disable gradient synchronizations across FSDP instances. Within this context, gradients will be accumulated in module variables, which will later be synchronized in the first forward-backward pass after exiting the context. This should only be used on the root FSDP instance and will recursively apply to all children FSDP instances. .. note:: This likely results in higher memory usage because FSDP will accumulate the full model gradients (instead of gradient shards) until the eventual sync. .. note:: When used with CPU offloading, the gradients will not be offloaded to CPU when inside the context manager. Instead, they will only be offloaded right after the eventual sync.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:no_sync arg:self arguments arg Call If Raise Call Call Assign For Call If Call Call Assign Try For Assign"
  },
  {
    "library": "numpy",
    "name": "amax",
    "source_code": "@array_function_dispatch(_max_dispatcher)\ndef amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, where=np._NoValue):\n    return _wrapreduction(a, np.maximum, 'max', axis, None, out, keepdims=keepdims, initial=initial, where=where)",
    "docstring": "Return the maximum of an array or maximum along an axis. is an alias of . See Also -------- max : alias of this function ndarray.max : equivalent method",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:amax arg:a arg:axis arg:out arg:keepdims arg:initial arg:where arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_nested_row_partitions",
    "source_code": "@property\ndef _nested_row_partitions(self):\n    return [RowPartition.from_row_splits(rs) for rs in self.nested_row_splits]",
    "docstring": "The row_partitions representing this shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_value.py",
    "ast_data": "FunctionDef name:_nested_row_partitions arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_next_is_sticky",
    "source_code": "def _next_is_sticky(self):\n    self._sticky_filter = True\n    return self",
    "docstring": "Indicate that the next filter call and the one following that should be treated as a single filter. This is only important when it comes to determining when to reuse tables for many-to-many filters. Required so that we can filter naturally on the results of related managers. This doesn't return a clone of the current QuerySet (it returns \"self\"). The method is only used internally and should be immediately followed by a filter() that does create a clone.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:_next_is_sticky arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "scale_categorical",
    "source_code": "def scale_categorical(self, axis, order=None, formatter=None):\n    _check_argument('axis', ['x', 'y'], axis)\n    if axis not in self.variables:\n        self.variables[axis] = None\n        self.var_types[axis] = 'categorical'\n        self.plot_data[axis] = ''\n    if self.var_types[axis] == 'numeric':\n        self.plot_data = self.plot_data.sort_values(axis, kind='mergesort')\n    cat_data = self.plot_data[axis].dropna()\n    self._var_ordered[axis] = order is not None or cat_data.dtype.name == 'category'\n    order = pd.Index(categorical_order(cat_data, order), name=axis)\n    if formatter is not None:\n        cat_data = cat_data.map(formatter)\n        order = order.map(formatter)\n    else:\n        cat_data = cat_data.astype(str)\n        order = order.astype(str)\n    self.var_levels[axis] = order\n    self.var_types[axis] = 'categorical'\n    self.plot_data[axis] = cat_data\n    return self",
    "docstring": "Enforce categorical (fixed-scale) rules for the data on given axis. Parameters ---------- axis : \"x\" or \"y\" Axis of the plot to operate on. order : list Order that unique values should appear in. formatter : callable Function mapping values to a string representation. Returns ------- self",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:scale_categorical arg:self arg:axis arg:order arg:formatter arguments arg arg arg arg Call If Compare Assign Assign Assign If Compare Assign Call Assign Call Assign BoolOp Compare Compare Assign Call Call If Compare Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_radio_fields_value",
    "source_code": "def _check_radio_fields_value(self, obj, val, label):\n    from django.contrib.admin.options import HORIZONTAL, VERTICAL\n    if val not in (HORIZONTAL, VERTICAL):\n        return [checks.Error(\"The value of '%s' must be either admin.HORIZONTAL or admin.VERTICAL.\" % label, obj=obj.__class__, id='admin.E024')]\n    else:\n        return []",
    "docstring": "Check type of a value of dictionary.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_radio_fields_value arg:self arg:obj arg:val arg:label arguments arg arg arg arg If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "kornia",
    "name": "_validate_shape",
    "source_code": "def _validate_shape(shape: Union[Tuple[int, ...], torch.Size], required_shapes: Tuple[str, ...]=('BCHW',)) -> None:\n    passed = False\n    for required_shape in required_shapes:\n        if len(shape) == len(required_shape):\n            passed = True\n            break\n    if not passed:\n        raise TypeError(f'Expected input shape in {required_shape}. Got {shape}.')",
    "docstring": "Check if the dtype of the input tensor is in the range of accepted_dtypes. Args: shape: tensor shape required_shapes: List. e.g. [\"BCHW\", \"BCDHW\"]",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_validate_shape arg:shape arg:required_shapes arguments arg arg Assign For If Compare Call Call Assign If Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "add_positions",
    "source_code": "def add_positions(self, position):\n    if position is None or (hasattr(position, 'len') and len(position) == 0):\n        return\n    positions = self.get_positions()\n    positions = np.hstack([positions, np.asanyarray(position)])\n    self.set_positions(positions)",
    "docstring": "Add one or more events at the specified positions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:add_positions arg:self arg:position arguments arg arg If BoolOp Compare BoolOp Call Compare Call Return return:no Assign Call Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "translate_y",
    "source_code": "def translate_y(probability: float, magnitude: int) -> OperationBase:\n    magnitudes = linspace(-0.5, 0.5, 11)\n    return TranslateY(None, probability, magnitude_range=(magnitudes[magnitude].item(), magnitudes[magnitude + 1].item()), symmetric_megnitude=False)",
    "docstring": "Return TranslateY op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\ops.py",
    "ast_data": "FunctionDef name:translate_y arg:probability arg:magnitude arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "obtain_capture_by_value_ops",
    "source_code": "def obtain_capture_by_value_ops(dataset):\n\n    def capture_by_value(op):\n        return op.outputs[0].dtype in TENSOR_TYPES_ALLOWLIST or op.type in OP_TYPES_ALLOWLIST\n    return _traverse(dataset, capture_by_value)",
    "docstring": "Given an input dataset, finds all allowlisted ops used for construction. Allowlisted ops are stateful ops which are known to be safe to capture by value. Args: dataset: Dataset to find allowlisted stateful ops for. Returns: A list of variant_tensor producing dataset ops used to construct this dataset.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\traverse.py",
    "ast_data": "FunctionDef name:obtain_capture_by_value_ops arg:dataset arguments arg FunctionDef name:capture_by_value arg:op arguments arg Return return:yes BoolOp Compare Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sess, coord, stop_grace_period_secs=120):\n    _WrappedSession.__init__(self, sess)\n    self._coord = coord\n    self._stop_grace_period_secs = stop_grace_period_secs",
    "docstring": "Create a new . Args: sess: A object. The wrapped session. coord: A object. stop_grace_period_secs: Number of seconds given to threads to stop after has been called.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sess arg:coord arg:stop_grace_period_secs arguments arg arg arg arg Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "may_unify_binary_op_mask_type",
    "source_code": "def may_unify_binary_op_mask_type(a, b):\n    if a.dtype == torch.bool:\n        assert b.dtype == torch.bool\n        mask_dtype = torch.int32\n        return unify_mask_base_type(V.kernel.compute, (a, b), mask_dtype)\n    return (a, b)",
    "docstring": "Given two cse variables, when dtype is bool, unify them to the same mask dtype and return casted cse variable.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_utils.py",
    "ast_data": "FunctionDef name:may_unify_binary_op_mask_type arg:a arg:b arguments arg arg If Compare Compare Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_sparse",
    "source_code": "@doc_controls.do_not_generate_docs\ndef is_sparse(tensor):\n    spec = getattr(tensor, '_type_spec', None)\n    if spec is not None:\n        return isinstance(spec, sparse_tensor.SparseTensorSpec)\n    return isinstance(tensor, sparse_tensor.SparseTensor)",
    "docstring": "Returns whether a tensor is a sparse tensor. Args: tensor: A tensor instance. Returns: A boolean. Example: >>> a = tf.keras.backend.placeholder((2, 2), sparse=False) >>> print(tf.keras.backend.is_sparse(a)) False >>> b = tf.keras.backend.placeholder((2, 2), sparse=True) >>> print(tf.keras.backend.is_sparse(b)) True",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:is_sparse arg:tensor arguments arg Assign Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_assert_valid_sample",
    "source_code": "def _maybe_assert_valid_sample(self, x):\n    if not self.validate_args:\n        return x\n    return control_flow_ops.with_dependencies([check_ops.assert_positive(x, message='samples must be positive'), check_ops.assert_near(array_ops.ones([], dtype=self.dtype), math_ops.reduce_sum(x, -1), message='sample last-dimension must sum to `1`')], x)",
    "docstring": "Checks the validity of a sample.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\dirichlet.py",
    "ast_data": "FunctionDef name:_maybe_assert_valid_sample arg:self arg:x arguments arg arg If Return return:yes Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_split_repeated_field",
    "source_code": "def _split_repeated_field(proto: message.Message, new_proto: message.Message, fields: util.FieldTypes, start_index: int, end_index: Optional[int]=None) -> None:\n    util.get_field(new_proto, fields)[0].MergeFrom(util.get_field(proto, fields)[0][start_index:end_index])",
    "docstring": "Generic function for copying a repeated field from one proto to another.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split_graph_def.py",
    "ast_data": "FunctionDef name:_split_repeated_field arg:proto arg:new_proto arg:fields arg:start_index arg:end_index arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "pandas",
    "name": "__arrow_array__",
    "source_code": "def __arrow_array__(self, type=None):\n    return self._pa_array",
    "docstring": "Convert myself to a pyarrow ChunkedArray.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:__arrow_array__ arg:self arg:type arguments arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "is_pydata_sparse_namespace",
    "source_code": "def is_pydata_sparse_namespace(xp: Namespace) -> bool:\n    return xp.__name__ == 'sparse'",
    "docstring": "Returns True if is a pydata/sparse namespace. See Also -------- array_namespace is_numpy_namespace is_cupy_namespace is_torch_namespace is_ndonnx_namespace is_dask_namespace is_jax_namespace is_array_api_strict_namespace",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_pydata_sparse_namespace arg:xp arguments arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "find_single_partition",
    "source_code": "def find_single_partition(self, total_size_of_graph, logical_device_id: int=0) -> None:\n    partition_0 = self.create_partition()\n    for node in self.graph_module.graph.nodes:\n        if node.op == 'output':\n            continue\n        partition_0.nodes.add(node)\n    partition_0.used_mem_bytes = total_size_of_graph\n    partition_0.logical_device_ids = [logical_device_id]\n    self.node_to_partition = get_node_to_partition_mapping(self.partitions)\n    return",
    "docstring": "Fit the whole fx module into one device",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:find_single_partition arg:self arg:total_size_of_graph arg:logical_device_id arguments arg arg arg Assign Call For If Compare Call Assign Assign Assign Call Return return:no"
  },
  {
    "library": "pandas",
    "name": "Substitution",
    "source_code": "class Substitution:\n\n    def __init__(self, *args, **kwargs) -> None:\n        if args and kwargs:\n            raise AssertionError('Only positional or keyword args are allowed')\n        self.params = args or kwargs\n\n    def __call__(self, func: F) -> F:\n        func.__doc__ = func.__doc__ and func.__doc__ % self.params\n        return func\n\n    def update(self, *args, **kwargs) -> None:\n        if isinstance(self.params, dict):\n            self.params.update(*args, **kwargs)",
    "docstring": "A decorator to take a function's docstring and perform string substitution on it. This decorator should be robust even if func.__doc__ is None (for example, if -OO was passed to the interpreter) Usage: construct a docstring.Substitution with a sequence or dictionary suitable for performing substitution; then decorate a suitable function with the constructed object. e.g. sub_author_name = Substitution(author='Jason') @sub_author_name def some_function(x): \"%(author)s wrote this function\" # note that some_function.__doc__ is now \"Jason wrote this function\" One can also use positional arguments. sub_first_last_names = Substitution('Edgar Allen', 'Poe') @sub_first_last_names def some_function(x): \"%s %s wrote the Raven\"",
    "type": "class",
    "file_path": "pandas\\pandas\\util\\_decorators.py",
    "ast_data": "ClassDef name:Substitution FunctionDef name:__init__ arg:self arguments arg arg arg If BoolOp Raise Call Assign BoolOp FunctionDef name:__call__ arg:self arg:func arguments arg arg Assign BoolOp Return return:yes FunctionDef name:update arg:self arguments arg arg arg If Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "def is_available() -> bool:\n    if not _is_compiled():\n        return False\n    if _nvml_based_avail():\n        return device_count() > 0\n    else:\n        return torch._C._cuda_getDeviceCount() > 0",
    "docstring": "Return a bool indicating if CUDA is currently available. .. note:: This function will NOT poison fork if the environment variable `multiprocessing-poison-fork-note`.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments If Call Return return:yes If Call Return return:yes Compare Call Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, x: Tensor) -> Tensor:\n    x = self.acoustic_model(x)\n    x = nn.functional.log_softmax(x, dim=1)\n    return x",
    "docstring": "Args: x (Tensor): Tensor of dimension (batch_size, num_features, input_length). Returns: Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length).",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, Xy=None):\n    X, y = validate_data(self, X, y, force_writeable=True, y_numeric=True, multi_output=True)\n    alpha = getattr(self, 'alpha', 0.0)\n    if hasattr(self, 'n_nonzero_coefs'):\n        alpha = 0.0\n        max_iter = self.n_nonzero_coefs\n    else:\n        max_iter = self.max_iter\n    if self.jitter is not None:\n        rng = check_random_state(self.random_state)\n        noise = rng.uniform(high=self.jitter, size=len(y))\n        y = y + noise\n    self._fit(X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path, Xy=Xy)\n    return self",
    "docstring": "Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Xy : array-like of shape (n_features,) or (n_features, n_targets), default=None Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. Returns ------- self : object Returns an instance of self.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_least_angle.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:Xy arguments arg arg arg arg Assign Call Assign Call If Call Assign Assign Assign If Compare Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "def fit_transform(self, X, y=None, **params):\n    if _routing_enabled():\n        routed_params = process_routing(self, 'fit_transform', **params)\n    else:\n        routed_params = Bunch()\n        for name, obj in self.transformer_list:\n            if hasattr(obj, 'fit_transform'):\n                routed_params[name] = Bunch(fit_transform={})\n                routed_params[name].fit_transform = params\n            else:\n                routed_params[name] = Bunch(fit={})\n                routed_params[name] = Bunch(transform={})\n                routed_params[name].fit = params\n    results = self._parallel_func(X, y, _fit_transform_one, routed_params)\n    if not results:\n        return np.zeros((X.shape[0], 0))\n    Xs, transformers = zip(*results)\n    self._update_transformer_list(transformers)\n    return self._hstack(Xs)",
    "docstring": "Fit all transformers, transform the data and concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. y : array-like of shape (n_samples, n_outputs), default=None Targets for supervised learning. **params : dict, default=None - If (default): Parameters directly passed to the methods of the sub-transformers. - If : Parameters safely routed to the methods of the sub-transformers. See :ref: for more details. .. versionchanged:: 1.5 can now be routed via metadata routing API. Returns ------- X_t : array-like or sparse matrix of shape (n_samples, sum_n_components) The of results of transformers. is the sum of (output dimension) over transformers.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg arg If Call Assign Call Assign Call For If Call Assign Call Assign Assign Call Assign Call Assign Assign Call If Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_setters",
    "source_code": "def get_setters(self):\n    setters = []\n    for name in dir(self.o):\n        if not name.startswith('set_'):\n            continue\n        func = getattr(self.o, name)\n        if not callable(func) or self.number_of_parameters(func) < 2 or self.is_alias(func):\n            continue\n        setters.append(name[4:])\n    return setters",
    "docstring": "Get the attribute strings with setters for object. For example, for a line, return ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_setters arg:self arguments arg Assign For Call If Call Assign Call If BoolOp Call Compare Call Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "InvalidRequestError",
    "source_code": "class InvalidRequestError(OAuth2Error):\n    error = 'invalid_request'",
    "docstring": "The request is missing a required parameter, includes an unsupported parameter value (other than grant type), repeats a parameter, includes multiple credentials, utilizes more than one mechanism for authenticating the client, or is otherwise malformed.",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\errors.py",
    "ast_data": "ClassDef name:InvalidRequestError Assign"
  },
  {
    "library": "pytorch",
    "name": "RendezvousTimeout",
    "source_code": "class RendezvousTimeout:\n    _ZERO = timedelta(0)\n    _DEFAULT_TIMEOUTS = {'join': timedelta(seconds=600), 'last_call': timedelta(seconds=30), 'close': timedelta(seconds=30), 'heartbeat': timedelta(seconds=5)}\n    _join: timedelta\n    _last_call: timedelta\n    _close: timedelta\n    _heartbeat: timedelta\n\n    def __init__(self, join: Optional[timedelta]=None, last_call: Optional[timedelta]=None, close: Optional[timedelta]=None, heartbeat: Optional[timedelta]=None) -> None:\n        self._set_timeouts(join=join, last_call=last_call, close=close, heartbeat=heartbeat)\n\n    @property\n    def join(self) -> timedelta:\n        return self._join\n\n    @property\n    def last_call(self) -> timedelta:\n        return self._last_call\n\n    @property\n    def close(self) -> timedelta:\n        return self._close\n\n    @property\n    def heartbeat(self) -> timedelta:\n        return self._heartbeat\n\n    def _set_timeouts(self, **timeouts: Optional[timedelta]):\n        for name, timeout in timeouts.items():\n            if timeout is None:\n                timeout = self._DEFAULT_TIMEOUTS[name]\n            if timeout <= self._ZERO:\n                raise ValueError(f'The {name} timeout ({timeout}) must be positive.')\n            setattr(self, '_' + name, timeout)",
    "docstring": "Hold the timeout configuration of a rendezvous. Args: join: The time within which the rendezvous is expected to complete. last_call: An additional wait amount before completing the rendezvous once the rendezvous has the minimum number of required participants. close: The time within which the rendezvous is expected to close after a call to :py:meth: or :py:meth:. heartbeat: The time within which a keep-alive heartbeat is expected to complete.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "ClassDef name:RendezvousTimeout Assign Call Assign Call Call Call Call FunctionDef name:__init__ arg:self arg:join arg:last_call arg:close arg:heartbeat arguments arg arg arg arg arg Call FunctionDef name:join arg:self arguments arg Return return:yes FunctionDef name:last_call arg:self arguments arg Return return:yes FunctionDef name:close arg:self arguments arg Return return:yes FunctionDef name:heartbeat arg:self arguments arg Return return:yes FunctionDef name:_set_timeouts arg:self arguments arg arg For Call If Compare Assign If Compare Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_wrap_and_check_outputs",
    "source_code": "def _wrap_and_check_outputs(self, outputs, single_output_default_name, error_label=None):\n    if not isinstance(outputs, dict):\n        outputs = {single_output_default_name: outputs}\n    output_dict = {}\n    for key, value in outputs.items():\n        error_name = error_label or single_output_default_name\n        key = self._check_output_key(key, error_name)\n        if not isinstance(value, tensor.Tensor):\n            raise ValueError('{} output value must be a Tensor; got {}.'.format(error_name, value))\n        output_dict[key] = value\n    return output_dict",
    "docstring": "Wraps raw tensors as dicts and checks type. Note that we create a new dict here so that we can overwrite the keys if necessary. Args: outputs: A or a dict of string to . single_output_default_name: A string key for use in the output dict if the provided is a raw tensor. error_label: descriptive string for use in error messages. If none, single_output_default_name will be used. Returns: A dict of tensors Raises: ValueError: if the outputs dict keys are not strings or tuples of strings or the values are not Tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "FunctionDef name:_wrap_and_check_outputs arg:self arg:outputs arg:single_output_default_name arg:error_label arguments arg arg arg arg If Call Assign Assign For Call Assign BoolOp Assign Call If Call Raise Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_single_shard_save",
    "source_code": "def _single_shard_save(file_prefix: tensor_lib.Tensor, shard: sharding_util.Shard, task: device_lib.DeviceSpec, options: 'checkpoint_options.CheckpointOptions | None'=None) -> ops.Operation:\n    options = options or checkpoint_options.CheckpointOptions()\n    tensor_names = []\n    tensors = []\n    slice_specs = []\n    for checkpoint_key, tensor_slices in shard.items():\n        for slice_spec, tensor in tensor_slices.items():\n            if tensor is not None:\n                name = tensor._wrapped_name if hasattr(tensor, '_wrapped_name') else checkpoint_key\n                spec = tensor._wrapped_slice_spec if hasattr(tensor, '_wrapped_slice_spec') else slice_spec\n                tensor_names.append(name)\n                tensors.append(tensor)\n                slice_specs.append(spec)\n    save_device = options.experimental_io_device or (tensors and task)\n    with ops.device(save_device or 'CPU:0'):\n        return io_ops.save_v2(file_prefix, tensor_names, slice_specs, tensors)",
    "docstring": "Save the saveable objects to a checkpoint with . Args: file_prefix: A string or scalar string Tensor containing the prefix to save under. shard: Dict containing tensors. {checkpoint key: {slice_spec: tensor} } task: The device spec task of the tensors in the shard. options: Optional object. Returns: An , or None when executing eagerly.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\functional_saver.py",
    "ast_data": "FunctionDef name:_single_shard_save arg:file_prefix arg:shard arg:task arg:options arguments arg arg arg arg Assign BoolOp Call Assign Assign Assign For Call For Call If Compare Assign Call Assign Call Call Call Call Assign BoolOp BoolOp With Call BoolOp Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "leaky_relu",
    "source_code": "@register_decomposition(aten.leaky_relu)\n@_inplace_wrapper\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('a',), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT)\ndef leaky_relu(a: TensorLikeType, negative_slope: float=0.01, inplace: bool=False) -> TensorLikeType:\n    if inplace:\n        raise NotImplementedError\n    python_type = utils.dtype_to_type(a.dtype)\n    if not utils.is_weakly_lesser_type(type(negative_slope), python_type):\n        msg = f'negative_slope argument of type {type(negative_slope)} cannot be safely cast to type {python_type}!'\n        raise ValueError(msg)\n    return torch.where(torch.gt(a, 0), a, torch.mul(a, negative_slope))",
    "docstring": "Reference implementation of torch.nn.functional.leaky_relu",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:leaky_relu arg:a arg:negative_slope arg:inplace arguments arg arg arg If Raise Assign Call If Call Call Assign Call Raise Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_tensors",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef from_tensors(self, tensors):\n    return super().from_tensors(tensors)",
    "docstring": "See tf.types.experimental.TraceType base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:from_tensors arg:self arg:tensors arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "decline_if_node_in_names",
    "source_code": "@classmethod\ndef decline_if_node_in_names(cls, disallow_set: set[str]) -> OperatorSupportBase:\n\n    def _decline_if_node_in_names(submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node) -> bool:\n        return node.name not in disallow_set\n    return create_op_support(_decline_if_node_in_names)",
    "docstring": "If a node has a name that is in the disallow set, reported it as non-supported.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py",
    "ast_data": "FunctionDef name:decline_if_node_in_names arg:cls arg:disallow_set arguments arg arg FunctionDef name:_decline_if_node_in_names arg:submodules arg:node arguments arg arg Return return:yes Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "CodeGenerator",
    "source_code": "class CodeGenerator(NodeStateTracker, gast.NodeVisitor):\n\n    def __init__(self, ctx):\n        super(CodeGenerator, self).__init__(ctx)\n        self._output_code = ''\n        self.source_map = {}\n\n    def emit(self, code):\n        self._output_code += code\n\n    @property\n    def code_buffer(self):\n        return self._output_code\n\n    def visit(self, node):\n        if anno.hasanno(node, anno.Basic.SKIP_PROCESSING):\n            return\n        parent_origin = self.ctx.current_origin\n        eof_before = len(self._output_code)\n        if anno.hasanno(node, anno.Basic.ORIGIN):\n            self.ctx.current_origin = anno.getanno(node, anno.Basic.ORIGIN)\n        try:\n            ret = super(CodeGenerator, self).visit(node)\n            eof_after = len(self._output_code)\n            if eof_before - eof_after:\n                inherited_origin = anno.getanno(node, anno.Basic.ORIGIN, default=parent_origin)\n                if inherited_origin is not None:\n                    self.source_map[eof_before, eof_after] = inherited_origin\n            return ret\n        finally:\n            self.ctx.current_origin = parent_origin",
    "docstring": "Base class for general-purpose Python-to-string code transformation. Similar to Base, but outputs arbitrary strings instead of a Python AST. This uses the same visitor mechanism that the standard NodeVisitor uses, meaning that subclasses write handlers for the different kinds of nodes. New code is generated using the emit method, which appends to a code buffer that can be afterwards obtained from code_buffer. Example: class SimpleCodeGen(CodeGenerator): def visitIf(self, node): self.emit('if ') self.visit(node.test) self.emit(' { ') self.visit(node.body) self.emit(' } else { ') self.visit(node.orelse) self.emit(' } ') node = ast.parse(...) gen = SimpleCodeGen() gen.visit(node) # gen.code_buffer contains the resulting code",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transformer.py",
    "ast_data": "ClassDef name:CodeGenerator FunctionDef name:__init__ arg:self arg:ctx arguments arg arg Call Call Assign Assign FunctionDef name:emit arg:self arg:code arguments arg arg FunctionDef name:code_buffer arg:self arguments arg Return return:yes FunctionDef name:visit arg:self arg:node arguments arg arg If Call Return return:no Assign Assign Call If Call Assign Call Try Assign Call Call Assign Call If Assign Call If Compare Assign Return return:yes Assign"
  },
  {
    "library": "django",
    "name": "ahas_perm",
    "source_code": "async def ahas_perm(self, perm, obj=None):\n    if self.is_active and self.is_superuser:\n        return True\n    return await _auser_has_perm(self, perm, obj)",
    "docstring": "See has_perm()",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "AsyncFunctionDef name:ahas_perm arg:self arg:perm arg:obj arguments arg arg arg If BoolOp Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "extend",
    "source_code": "def extend(self, modules: Iterable[Module]) -> Self:\n    if not isinstance(modules, container_abcs.Iterable):\n        raise TypeError('ModuleList.extend should be called with an iterable, but got ' + type(modules).__name__)\n    offset = len(self)\n    for i, module in enumerate(modules):\n        self.add_module(str(offset + i), module)\n    return self",
    "docstring": "Append modules from a Python iterable to the end of the list. Args: modules (iterable): iterable of modules to append",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:extend arg:self arg:modules arguments arg arg If Call Raise Call Call Assign Call For Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_root_scalar_brentq_doc",
    "source_code": "def _root_scalar_brentq_doc():\n    pass",
    "docstring": "Options ------- args : tuple, optional Extra arguments passed to the objective function. bracket: A sequence of 2 floats, optional An interval bracketing a root. `` must have different signs at the two endpoints. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. options: dict, optional Specifies any method-specific options not covered above",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_root_scalar.py",
    "ast_data": "FunctionDef name:_root_scalar_brentq_doc arguments"
  },
  {
    "library": "kornia",
    "name": "rescale",
    "source_code": "def rescale(input: Tensor, factor: Union[float, Tuple[float, float]], interpolation: str='bilinear', align_corners: Optional[bool]=None, antialias: bool=False) -> Tensor:\n    if isinstance(factor, float):\n        factor_vert = factor_horz = factor\n    else:\n        factor_vert, factor_horz = factor\n    height, width = input.size()[-2:]\n    size = (int(height * factor_vert), int(width * factor_horz))\n    return resize(input, size, interpolation=interpolation, align_corners=align_corners, antialias=antialias)",
    "docstring": "Rescale the input Tensor with the given factor. .. image:: _static/img/rescale.png Args: input: The image tensor to be scale with shape of :math:. factor: Desired scaling factor in each direction. If scalar, the value is used for both the x- and y-direction. interpolation: algorithm used for upsampling: ``. antialias: if True, then image will be filtered with Gaussian before downscaling. No effect for upscaling. Returns: The rescaled tensor with the shape as the specified size. Example: >>> img = torch.rand(1, 3, 4, 4) >>> out = rescale(img, (2, 3)) >>> print(out.shape) torch.Size([1, 3, 8, 12])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "FunctionDef name:rescale arg:input arg:factor arg:interpolation arg:align_corners arg:antialias arguments arg arg arg arg arg If Call Assign Assign Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "point_on_surface",
    "source_code": "@property\ndef point_on_surface(self):\n    return self._topology(capi.geos_pointonsurface(self.ptr))",
    "docstring": "Compute an interior point of this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:point_on_surface arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "geos_version_tuple",
    "source_code": "def geos_version_tuple():\n    return get_version_tuple(geos_version().decode())",
    "docstring": "Return the GEOS version as a tuple (major, minor, subminor).",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\geos\\libgeos.py",
    "ast_data": "FunctionDef name:geos_version_tuple arguments Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_single_linkage_tree",
    "source_code": "def _single_linkage_tree(connectivity, n_samples, n_nodes, n_clusters, n_connected_components, return_distance):\n    from scipy.sparse.csgraph import minimum_spanning_tree\n    connectivity = connectivity.astype(np.float64, copy=False)\n    epsilon_value = np.finfo(dtype=connectivity.data.dtype).eps\n    connectivity.data[connectivity.data == 0] = epsilon_value\n    mst = minimum_spanning_tree(connectivity.tocsr())\n    mst = mst.tocoo()\n    mst.data[mst.data == epsilon_value] = 0\n    mst_array = np.vstack([mst.row, mst.col, mst.data]).T\n    mst_array = mst_array[np.argsort(mst_array.T[2], kind='mergesort'), :]\n    single_linkage_tree = _hierarchical._single_linkage_label(mst_array)\n    children_ = single_linkage_tree[:, :2].astype(int)\n    parent = np.arange(n_nodes, dtype=np.intp)\n    for i, (left, right) in enumerate(children_, n_samples):\n        if n_clusters is not None and i >= n_nodes:\n            break\n        if left < n_nodes:\n            parent[left] = i\n        if right < n_nodes:\n            parent[right] = i\n    if return_distance:\n        distances = single_linkage_tree[:, 2]\n        return (children_, n_connected_components, n_samples, parent, distances)\n    return (children_, n_connected_components, n_samples, parent)",
    "docstring": "Perform single linkage clustering on sparse data via the minimum spanning tree from scipy.sparse.csgraph, then using union-find to label. The parent array is then generated by walking through the tree.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_agglomerative.py",
    "ast_data": "FunctionDef name:_single_linkage_tree arg:connectivity arg:n_samples arg:n_nodes arg:n_clusters arg:n_connected_components arg:return_distance arguments arg arg arg arg arg arg Assign Call Assign Call Assign Compare Assign Call Call Assign Call Assign Compare Assign Call Assign Call Assign Call Assign Call Assign Call For Call If BoolOp Compare Compare If Compare Assign If Compare Assign If Assign Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ColorizingArtist",
    "source_code": "class ColorizingArtist(_ScalarMappable, artist.Artist):\n\n    def __init__(self, colorizer, **kwargs):\n        _api.check_isinstance(Colorizer, colorizer=colorizer)\n        super().__init__(colorizer=colorizer, **kwargs)\n\n    @property\n    def colorizer(self):\n        return self._colorizer\n\n    @colorizer.setter\n    def colorizer(self, cl):\n        _api.check_isinstance(Colorizer, colorizer=cl)\n        self._colorizer.callbacks.disconnect(self._id_colorizer)\n        self._colorizer = cl\n        self._id_colorizer = cl.callbacks.connect('changed', self.changed)\n\n    def _set_colorizer_check_keywords(self, colorizer, **kwargs):\n        self._check_exclusionary_keywords(colorizer, **kwargs)\n        self.colorizer = colorizer",
    "docstring": "Base class for artists that make map data to color using a . The applies data normalization before returning RGBA colors from a .",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "ClassDef name:ColorizingArtist FunctionDef name:__init__ arg:self arg:colorizer arguments arg arg arg Call Call Call FunctionDef name:colorizer arg:self arguments arg Return return:yes FunctionDef name:colorizer arg:self arg:cl arguments arg arg Call Call Assign Assign Call FunctionDef name:_set_colorizer_check_keywords arg:self arg:colorizer arguments arg arg arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_is_input_non_float_tensor",
    "source_code": "def _is_input_non_float_tensor(node: Node):\n    if 'val' not in node.meta or not isinstance(node.meta['val'], FakeTensor):\n        return True\n    return node.meta['val'].dtype != torch.float32",
    "docstring": "Check if the input is not a float tensor, so that we can skip quantization for the node since observers only works with float Tensors",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\xnnpack_quantizer_utils.py",
    "ast_data": "FunctionDef name:_is_input_non_float_tensor arg:node arguments arg If BoolOp Compare Call Return return:yes Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "contain_metadata_mutation_ops",
    "source_code": "def contain_metadata_mutation_ops(module: torch.fx.GraphModule) -> bool:\n    for node in module.graph.nodes:\n        if node.op == 'call_function' and hasattr(node.target, 'tags') and (torch.Tag.inplace_view in node.target.tags):\n            return True\n    return False",
    "docstring": "Checks if the module contains any metadata mutation ops.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\utils.py",
    "ast_data": "FunctionDef name:contain_metadata_mutation_ops arg:module arguments arg For If BoolOp Compare Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "check_setting_languages",
    "source_code": "@register(Tags.translation)\ndef check_setting_languages(app_configs, **kwargs):\n    return [Error(E002.msg.format(tag), id=E002.id) for tag, _ in settings.LANGUAGES if not isinstance(tag, str) or not language_code_re.match(tag)]",
    "docstring": "Error if LANGUAGES setting is invalid.",
    "type": "function",
    "file_path": "django\\django\\core\\checks\\translation.py",
    "ast_data": "FunctionDef name:check_setting_languages arg:app_configs arguments arg arg Return return:yes Call Call BoolOp Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_convert_timedelta",
    "source_code": "def _maybe_convert_timedelta(self, other) -> int | npt.NDArray[np.int64]:\n    if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)):\n        if isinstance(self.freq, Tick):\n            delta = self._data._check_timedeltalike_freq_compat(other)\n            return delta\n    elif isinstance(other, BaseOffset):\n        if other.base == self.freq.base:\n            return other.n\n        raise raise_on_incompatible(self, other)\n    elif is_integer(other):\n        assert isinstance(other, int)\n        return other\n    raise raise_on_incompatible(self, None)",
    "docstring": "Convert timedelta-like input to an integer multiple of self.freq Parameters ---------- other : timedelta, np.timedelta64, DateOffset, int, np.ndarray Returns ------- converted : int, np.ndarray[int64] Raises ------ IncompatibleFrequency : if the input cannot be written as a multiple of self.freq. Note IncompatibleFrequency subclasses ValueError.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\period.py",
    "ast_data": "FunctionDef name:_maybe_convert_timedelta arg:self arg:other arguments arg arg If Call If Call Assign Call Return return:yes If Call If Compare Return return:yes Raise Call If Call Call Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_log_normalizer",
    "source_code": "def _log_normalizer(self, x):\n    out_unst_reg = torch.max(torch.le(x, self._lims[0] - 0.5), torch.gt(x, self._lims[1] - 0.5))\n    cut_nat_params = torch.where(out_unst_reg, x, (self._lims[0] - 0.5) * torch.ones_like(x))\n    log_norm = torch.log(torch.abs(torch.special.expm1(cut_nat_params))) - torch.log(torch.abs(cut_nat_params))\n    taylor = 0.5 * x + torch.pow(x, 2) / 24.0 - torch.pow(x, 4) / 2880.0\n    return torch.where(out_unst_reg, log_norm, taylor)",
    "docstring": "computes the log normalizing constant as a function of the natural parameter",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\continuous_bernoulli.py",
    "ast_data": "FunctionDef name:_log_normalizer arg:self arg:x arguments arg arg Assign Call Call Call Assign Call Call Assign Call Call Call Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "def state_dict(self) -> dict[str, Any]:\n    raise NotImplementedError('ShardedOptimizer state_dict not implemented yet!')",
    "docstring": "Returned state and param_groups will contain parameter keys instead of parameter indices like torch.optim.Optimizer. This allows for advanced functionality like optimizer re-sharding to be implemented.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_optim\\api.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "init_population_array",
    "source_code": "def init_population_array(self, init):\n    popn = np.asarray(init, dtype=np.float64)\n    if np.size(popn, 0) < 5 or popn.shape[1] != self.parameter_count or len(popn.shape) != 2:\n        raise ValueError('The population supplied needs to have shape (S, len(x)), where S > 4.')\n    self.population = np.clip(self._unscale_parameters(popn), 0, 1)\n    self.num_population_members = np.size(self.population, 0)\n    self.population_shape = (self.num_population_members, self.parameter_count)\n    self.population_energies = np.full(self.num_population_members, np.inf)\n    self._nfev = 0",
    "docstring": "Initializes the population with a user specified population. Parameters ---------- init : np.ndarray Array specifying subset of the initial population. The array should have shape (S, N), where N is the number of parameters. The population is clipped to the lower and upper bounds.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:init_population_array arg:self arg:init arguments arg arg Assign Call If BoolOp Compare Call Compare Compare Call Raise Call Assign Call Call Assign Call Assign Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "call_context",
    "source_code": "def call_context():\n    call_ctx = getattr(_call_context, 'call_context', None)\n    if call_ctx is None:\n        call_ctx = CallContext()\n        _call_context.call_context = call_ctx\n    return call_ctx",
    "docstring": "Returns currently active .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:call_context arguments Assign Call If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_var_to_tensor",
    "source_code": "def _var_to_tensor(var, dtype=None, name=None, as_ref=False):\n    del name\n    if dtype is not None and (not dtype.is_compatible_with(var.dtype)):\n        raise ValueError('Incompatible type conversion requested to type {!r} for variable of type {!r}'.format(dtype.name, var.dtype.name))\n    if as_ref:\n        raise NotImplementedError(\"ShardedVariable doesn't support being used as a reference.\")\n    if 'embedding_lookup' in ops.get_name_scope():\n        raise TypeError('Converting ShardedVariable to tensor in embedding lookup ops is disallowed.')\n    return array_ops.concat(var.variables, axis=0)",
    "docstring": "Converts a to a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:_var_to_tensor arg:var arg:dtype arg:name arg:as_ref arguments arg arg arg arg If BoolOp Compare Call Raise Call Call If Raise Call If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "calculate_qparams",
    "source_code": "@torch.jit.export\ndef calculate_qparams(self):\n    return self._calculate_qparams(self.min_val, self.max_val)",
    "docstring": "Calculates the quantization parameters.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "FunctionDef name:calculate_qparams arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "latest",
    "source_code": "def latest(self, *fields):\n    if self.query.is_sliced:\n        raise TypeError('Cannot change a query once a slice has been taken.')\n    return self.reverse()._earliest(*fields)",
    "docstring": "Return the latest object according to fields (if given) or by the model's Meta.get_latest_by.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:latest arg:self arguments arg arg If Raise Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "rollback",
    "source_code": "def rollback(using=None):\n    get_connection(using).rollback()",
    "docstring": "Roll back a transaction.",
    "type": "function",
    "file_path": "django\\django\\db\\transaction.py",
    "ast_data": "FunctionDef name:rollback arg:using arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "close_group",
    "source_code": "def close_group(self, s):\n    pass",
    "docstring": "Close a grouping element with label *s*. Only used by the SVG renderer.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:close_group arg:self arg:s arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "LoadContext",
    "source_code": "class LoadContext(threading.local):\n\n    def __init__(self):\n        super(LoadContext, self).__init__()\n        self._entered_load_context = []\n        self._load_options = None\n\n    def set_load_options(self, load_options):\n        self._load_options = load_options\n        self._entered_load_context.append(True)\n\n    def clear_load_options(self):\n        self._load_options = None\n        self._entered_load_context.pop()\n\n    def load_options(self):\n        return self._load_options\n\n    def in_load_context(self):\n        return self._entered_load_context",
    "docstring": "A context for loading a model.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load_context.py",
    "ast_data": "ClassDef name:LoadContext FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign FunctionDef name:set_load_options arg:self arg:load_options arguments arg arg Assign Call FunctionDef name:clear_load_options arg:self arguments arg Assign Call FunctionDef name:load_options arg:self arguments arg Return return:yes FunctionDef name:in_load_context arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "append",
    "source_code": "def append(self, item):\n    self._items.append(item)",
    "docstring": "Append an item to the Menu. Args: item: (MenuItem) the item to be appended.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:append arg:self arg:item arguments arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "aic",
    "source_code": "def aic(self, X):\n    return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters()",
    "docstring": "Akaike information criterion for the current model on the input X. You can refer to this :ref: for more details regarding the formulation of the AIC used. Parameters ---------- X : array of shape (n_samples, n_dimensions) The input samples. Returns ------- aic : float The lower the better.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:aic arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_pass",
    "source_code": "def add_pass(self, _pass: Callable):\n    self.passes.append(_pass)\n    self._validated = False",
    "docstring": "Adds a pass into the current list of passes.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_manager.py",
    "ast_data": "FunctionDef name:add_pass arg:self arg:_pass arguments arg arg Call Assign"
  },
  {
    "library": "numpy",
    "name": "read_magic",
    "source_code": "@set_module('numpy.lib.format')\ndef read_magic(fp):\n    magic_str = _read_bytes(fp, MAGIC_LEN, 'magic string')\n    if magic_str[:-2] != MAGIC_PREFIX:\n        msg = 'the magic string is not correct; expected %r, got %r'\n        raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))\n    major, minor = magic_str[-2:]\n    return (major, minor)",
    "docstring": "Read the magic string to get the version of the file format. Parameters ---------- fp : filelike object Returns ------- major : int minor : int",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_format_impl.py",
    "ast_data": "FunctionDef name:read_magic arg:fp arguments arg Assign Call If Compare Assign Raise Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_getitem_tuple_same_dim",
    "source_code": "@final\ndef _getitem_tuple_same_dim(self, tup: tuple):\n    retval = self.obj\n    start_val = self.ndim - len(tup) + 1\n    for i, key in enumerate(reversed(tup)):\n        i = self.ndim - i - start_val\n        if com.is_null_slice(key):\n            continue\n        retval = getattr(retval, self.name)._getitem_axis(key, axis=i)\n        assert retval.ndim == self.ndim\n    if retval is self.obj:\n        retval = retval.copy(deep=False)\n    return retval",
    "docstring": "Index with indexers that should return an object of the same dimension as self.obj. This is only called after a failed call to _getitem_lowerdim.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_getitem_tuple_same_dim arg:self arg:tup arguments arg arg Assign Assign Call For Call Call Assign If Call Assign Call Call Compare If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PeriodicModelAverager",
    "source_code": "class PeriodicModelAverager(ModelAverager):\n\n    def __init__(self, period, warmup_steps=0, process_group: Optional[dist.ProcessGroup]=None):\n        super().__init__(process_group)\n        if warmup_steps < 0:\n            raise ValueError('Arg ``warmup_steps`` must be a non-negative number.')\n        self.warmup_steps = warmup_steps\n        if period < 1:\n            raise ValueError('Arg ``period`` must be a positive value.')\n        elif period == 1:\n            warnings.warn('When period is 1, no need to use model averaging because the communication cost of all-reducing parameters will be no less than the cost of all-reducing gradients by DistributedDataParallel in the backward pass. Therefore, only DistributedDataParallel should be used for this case.')\n        self.period = period\n\n    def average_parameters(self, params: Union[Iterable[torch.nn.Parameter], Iterable[dict[str, torch.nn.Parameter]]]):\n        if self.step >= self.warmup_steps and (self.step - self.warmup_steps) % self.period == 0:\n            utils.average_parameters_or_parameter_groups(params, _not_none(self.process_group))\n        self.step += 1",
    "docstring": "Averages parameters periodically after the warm-up stage. This can be used for running _, by running :class: (DDP) using the subgroups created by :meth:. Args: period (int): The number of steps per model averaging. Usually the period should be greater than `torch.distributed.init_process_group` period. >>> averager.average_parameters(model.parameters())",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\model_averaging\\averagers.py",
    "ast_data": "ClassDef name:PeriodicModelAverager FunctionDef name:__init__ arg:self arg:period arg:warmup_steps arg:process_group arguments arg arg arg arg Call Call If Compare Raise Call Assign If Compare Raise Call If Compare Call Assign FunctionDef name:average_parameters arg:self arg:params arguments arg arg If BoolOp Compare Compare Call Call"
  },
  {
    "library": "virtualenv",
    "name": "add_cachedir_tag",
    "source_code": "def add_cachedir_tag(self):\n    cachedir_tag_file = self.dest / 'CACHEDIR.TAG'\n    if not cachedir_tag_file.exists():\n        cachedir_tag_text = textwrap.dedent('\\n                Signature: 8a477f597d28d172789f06886806bc55\\n                # This file is a cache directory tag created by Python virtualenv.\\n                # For information about cache directory tags, see:\\n                #   https://bford.info/cachedir/\\n            ').strip()\n        cachedir_tag_file.write_text(cachedir_tag_text, encoding='utf-8')",
    "docstring": "Generate a file indicating that this is not meant to be backed up.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\create\\creator.py",
    "ast_data": "FunctionDef name:add_cachedir_tag arg:self arguments arg Assign If Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "find_deps",
    "source_code": "def find_deps(self) -> dict[torch.fx.Node, NodeSet]:\n    deps: dict[torch.fx.Node, NodeSet] = defaultdict(set)\n    for node in self.module.graph.nodes:\n        if node.op not in CALLABLE_NODE_OPS:\n            continue\n        for user in node.users:\n            if user.op != 'output':\n                deps[user].add(node)\n    return deps",
    "docstring": "Builds a graph of node dependencies. Leaf nodes don't have any dependencies and the \"output\" node doesn't have nodes depending on it. Resulting graph has only direct dependencies, i.e. there are no transitive dependencies.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py",
    "ast_data": "FunctionDef name:find_deps arg:self arguments arg Call For If Compare For If Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "num_columns",
    "source_code": "@abstractmethod\ndef num_columns(self) -> int:\n    pass",
    "docstring": "Return the number of columns in the DataFrame.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:num_columns arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "get_params",
    "source_code": "def get_params(self, deep=True):\n    return self._get_params('_transformers', deep=deep)",
    "docstring": "Get parameters for this estimator. Returns the parameters given in the constructor as well as the estimators contained within the of the . Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:get_params arg:self arg:deep arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "inserting_after",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef inserting_after(self, n: Optional[Node]=None):\n    if n is None:\n        return self.inserting_before(self._root)\n    assert n.graph == self, 'Node to insert after is not in graph.'\n    return _InsertPoint(self, n.append)",
    "docstring": "Set the point at which create_node and companion methods will insert into the graph. When used within a 'with' statement, this will temporary set the insert point and then restore it when the with statement exits:: with g.inserting_after(n): ... # inserting after node n ... # insert point restored to what it was previously g.inserting_after(n) # set the insert point permanently Args: n (Optional[Node]): The node before which to insert. If None this will insert after the beginning of the entire graph. Returns: A resource manager that will restore the insert point on ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:inserting_after arg:self arg:n arguments arg arg If Compare Return return:yes Call Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "analyze_step_stats",
    "source_code": "def analyze_step_stats(self, show_dataflow: bool=True, show_memory: bool=True, op_time: str='schedule') -> StepStatsAnalysis:\n    self._preprocess_op_time(op_time)\n    self._allocate_pids()\n    self._assign_lanes()\n    self._analyze_tensors(show_memory)\n    self._show_compute(show_dataflow)\n    if show_memory:\n        self._show_memory_counters()\n    return StepStatsAnalysis(chrome_trace=self._chrome_trace, allocator_maximums=self._allocator_maximums)",
    "docstring": "Analyze the step stats and format it into Chrome Trace Format. Args: show_dataflow: (Optional.) If True, add flow events to the trace connecting producers and consumers of tensors. show_memory: (Optional.) If True, add object snapshot events to the trace showing the sizes and lifetimes of tensors. op_time: (Optional.) How the execution time of op is shown in timeline. Possible values are \"schedule\", \"gpu\" and \"all\". \"schedule\" will show op from the time it is scheduled to the end of the scheduling. Notice by the end of its scheduling its async kernels may not start yet. It is shown using the default value from step_stats. \"gpu\" will show op with the execution time of its kernels on GPU. \"all\" will show op from the start of its scheduling to the end of its last kernel. Returns: A 'StepStatsAnalysis' object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:analyze_step_stats arg:self arg:show_dataflow arg:show_memory arg:op_time arguments arg arg arg arg Call Call Call Call Call If Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "scale_for_robust_loss_function",
    "source_code": "def scale_for_robust_loss_function(J, f, rho):\n    J_scale = rho[1] + 2 * rho[2] * f ** 2\n    J_scale[J_scale < EPS] = EPS\n    J_scale **= 0.5\n    f *= rho[1] / J_scale\n    return (left_multiply(J, J_scale, copy=False), f)",
    "docstring": "Scale Jacobian and residuals for a robust loss function. Arrays are modified in place.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\common.py",
    "ast_data": "FunctionDef name:scale_for_robust_loss_function arg:J arg:f arg:rho arguments arg arg arg Assign Assign Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_TFRecordDataset",
    "source_code": "class _TFRecordDataset(dataset_ops.DatasetSource):\n\n    def __init__(self, filenames, compression_type=None, buffer_size=None, name=None):\n        self._filenames = filenames\n        self._compression_type = convert.optional_param_to_tensor('compression_type', compression_type, argument_default='', argument_dtype=dtypes.string)\n        self._buffer_size = convert.optional_param_to_tensor('buffer_size', buffer_size, argument_default=_DEFAULT_TF_RECORD_BUFFER_SIZE_BYTES)\n        self._name = name\n        variant_tensor = gen_dataset_ops.tf_record_dataset(self._filenames, self._compression_type, self._buffer_size, metadata=self._metadata.SerializeToString())\n        super(_TFRecordDataset, self).__init__(variant_tensor)\n\n    @property\n    def element_spec(self):\n        return tensor_spec.TensorSpec([], dtypes.string)",
    "docstring": "A comprising records from one or more TFRecord files.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py",
    "ast_data": "ClassDef name:_TFRecordDataset FunctionDef name:__init__ arg:self arg:filenames arg:compression_type arg:buffer_size arg:name arguments arg arg arg arg arg Assign Assign Call Assign Call Assign Assign Call Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "codegen_node",
    "source_code": "def codegen_node(self, node: Union[OuterLoopFusedSchedulerNode, FusedSchedulerNode, SchedulerNode]):\n    kernel_group = self.kernel_group\n    if isinstance(node, OuterLoopFusedSchedulerNode):\n        self.codegen_outer_loop_node(node)\n    else:\n        nodes: list[SchedulerNode] = node.get_nodes()\n        nodes = self.try_loop_split(nodes)\n        cpp_kernel_proxy = CppKernelProxy(kernel_group)\n        cpp_kernel_proxy.codegen_nodes(nodes)\n        kernel_group.finalize_kernel(cpp_kernel_proxy, nodes)\n    args_num = self._get_scheduled_num_args()\n    if args_num > CppScheduling.MAX_FUSED_KERNEL_ARGS_NUM:\n        self._set_flush_status(True)",
    "docstring": "Turn an set of pre-fused nodes into a C++ kernel.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:codegen_node arg:self arg:node arguments arg arg Assign If Call Call Call Assign Call Assign Call Call Call Assign Call If Compare Call"
  },
  {
    "library": "django",
    "name": "ReverseManyToOneDescriptor",
    "source_code": "class ReverseManyToOneDescriptor:\n\n    def __init__(self, rel):\n        self.rel = rel\n        self.field = rel.field\n\n    @cached_property\n    def related_manager_cls(self):\n        related_model = self.rel.related_model\n        return create_reverse_many_to_one_manager(related_model._default_manager.__class__, self.rel)\n\n    def __get__(self, instance, cls=None):\n        if instance is None:\n            return self\n        return self.related_manager_cls(instance)\n\n    def _get_set_deprecation_msg_params(self):\n        return ('reverse side of a related set', self.rel.accessor_name)\n\n    def __set__(self, instance, value):\n        raise TypeError('Direct assignment to the %s is prohibited. Use %s.set() instead.' % self._get_set_deprecation_msg_params())",
    "docstring": "Accessor to the related objects manager on the reverse side of a many-to-one relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') `` defined below.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py",
    "ast_data": "ClassDef name:ReverseManyToOneDescriptor FunctionDef name:__init__ arg:self arg:rel arguments arg arg Assign Assign FunctionDef name:related_manager_cls arg:self arguments arg Assign Return return:yes Call FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg If Compare Return return:yes Return return:yes Call FunctionDef name:_get_set_deprecation_msg_params arg:self arguments arg Return return:yes FunctionDef name:__set__ arg:self arg:instance arg:value arguments arg arg arg Raise Call Call"
  },
  {
    "library": "scipy",
    "name": "_attach_methods",
    "source_code": "def _attach_methods(self):\n    raise NotImplementedError",
    "docstring": "Attaches dynamically created methods to the rv_* instance. This method must be overridden by subclasses, and must itself call _attach_argparser_methods. This method is called in __init__ in subclasses, and in __setstate__",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_attach_methods arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, pattern: Graph, match_output: bool=False, match_placeholder: bool=False, remove_overlapping_matches: bool=True, ignore_literals: bool=False) -> None:\n    self.pattern = pattern\n    self.match_output = match_output\n    self.match_placeholder = match_placeholder\n    self.remove_overlapping_matches = remove_overlapping_matches\n    self.ignore_literals = ignore_literals\n    if len(pattern.nodes) == 0:\n        raise ValueError('SubgraphMatcher cannot be initialized with an empty pattern')\n    for node in pattern.nodes:\n        if node.op != 'output':\n            assert len(node.users) > 0, 'SubgraphMatcher cannot be initialized with an pattern with dead code'\n    self.pattern_placeholder_nodes = [n for n in pattern.nodes if n.op == 'placeholder']\n    output_node = next(iter(reversed(pattern.nodes)))\n    self.pattern_returning_nodes: list[Node] = output_node.all_input_nodes\n    self.pattern_anchors: list[Node] = []\n    if match_output:\n        self.pattern_anchors = [output_node]\n    else:\n        self.pattern_anchors = [n for n in output_node.all_input_nodes if len(n.users) == 1]",
    "docstring": "Args: pattern: the targeted matching pattern, represented in fx.Graph. match_output: If True, output node in the pattern graph will be treated as a part of the targeted pattern. If False, output node is ignored during match. match_placeholder: If True, placeholder node in the pattern graph will be treated as a part of the targeted pattern. If False, placeholder nodes will be used a wildcard. remove_overlapping_matches: If True, in the case of overlapping matches, only the first match will be returned. ignore_literals: If True, will not check if literals are equal and will instead treat them as wildcards.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\utils\\matcher_utils.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:pattern arg:match_output arg:match_placeholder arg:remove_overlapping_matches arg:ignore_literals arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign If Compare Call Raise Call For If Compare Compare Call Assign Compare Assign Call Call Call If Assign Assign Compare Call"
  },
  {
    "library": "pandas",
    "name": "wrap_results_for_axis",
    "source_code": "def wrap_results_for_axis(self, results: ResType, res_index: Index) -> DataFrame | Series:\n    result: DataFrame | Series\n    if self.result_type == 'expand':\n        result = self.infer_to_same_shape(results, res_index)\n    elif not isinstance(results[0], ABCSeries):\n        result = self.obj._constructor_sliced(results)\n        result.index = res_index\n    else:\n        result = self.infer_to_same_shape(results, res_index)\n    return result",
    "docstring": "return the results for the columns",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:wrap_results_for_axis arg:self arg:results arg:res_index arguments arg arg arg If Compare Assign Call If Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__create_chunk_list__",
    "source_code": "def __create_chunk_list__(self):\n    from torch.distributed.checkpoint.planner_helpers import _create_chunk_from_dtensor\n    if hasattr(self._local_tensor, '__create_chunk_list__'):\n        return self._local_tensor.__create_chunk_list__()\n    elif isinstance(self._local_tensor, torch.Tensor):\n        return [_create_chunk_from_dtensor(self)]\n    else:\n        raise RuntimeError('Unsupported tensor type!')",
    "docstring": "Return a list of ChunkStorageMetadata, which is a dataclass that describes the size/offset of the local shard/replica on current rank. For DTensor, each rank will have a single local shard/replica, so the returned list usually only has one element. This dunder method is primariy used for distributed checkpoint purpose. Returns: A List[:class:] object that represents the shard size/offset on the current rank.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:__create_chunk_list__ arg:self arguments arg If Call Return return:yes Call If Call Return return:yes Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "figure",
    "source_code": "@property\ndef figure(self):\n    return self._figure",
    "docstring": "Figure that holds the canvas.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "FunctionDef name:figure arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_enum_to_json",
    "source_code": "@classmethod\ndef _enum_to_json(cls, enum_value):\n    if enum_value is None:\n        return None\n    assert isinstance(enum_value, enum.Enum)\n    return {'type': enum_value.__class__.__name__, 'name': enum_value.name}",
    "docstring": "Convert enum value to JSON dict. Args: enum_value: Enum value Returns: dict: Dictionary representation",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\serialization.py",
    "ast_data": "FunctionDef name:_enum_to_json arg:cls arg:enum_value arguments arg arg If Compare Return return:no Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "reorder_columns",
    "source_code": "def reorder_columns(ldesc: Sequence[Series]) -> list[Hashable]:\n    names: list[Hashable] = []\n    seen_names: set[Hashable] = set()\n    ldesc_indexes = sorted((x.index for x in ldesc), key=len)\n    for idxnames in ldesc_indexes:\n        for name in idxnames:\n            if name not in seen_names:\n                seen_names.add(name)\n                names.append(name)\n    return names",
    "docstring": "Set a convenient order for rows for display.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\methods\\describe.py",
    "ast_data": "FunctionDef name:reorder_columns arg:ldesc arguments arg Call Assign Call For For If Compare Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "truediv",
    "source_code": "def truediv(self, other, level=None, fill_value=None, axis: Axis=0) -> Series:\n    return self._flex_method(other, operator.truediv, level=level, fill_value=fill_value, axis=axis)",
    "docstring": "Return Floating division of series and other, element-wise (binary operator ). Equivalent to `Python documentation `_ for more details. Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=[\"a\", \"b\", \"c\", \"d\"]) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=[\"a\", \"b\", \"d\", \"e\"]) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.divide(b, fill_value=0) a 1.0 b inf c inf d 0.0 e NaN dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:truediv arg:self arg:other arg:level arg:fill_value arg:axis arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "BatchableExtensionTypeSpec",
    "source_code": "class BatchableExtensionTypeSpec(ExtensionTypeSpec, type_spec.BatchableTypeSpec):\n    __batch_encoder__ = ExtensionTypeBatchEncoder()\n\n    def _batch(self, batch_size):\n        return self.__batch_encoder__.batch(self, batch_size)\n\n    def _unbatch(self):\n        return self.__batch_encoder__.unbatch(self)\n\n    def _to_tensor_list(self, value):\n        return type_spec.batchable_to_tensor_list(self, value)\n\n    def _to_batched_tensor_list(self, value):\n        return type_spec.batchable_to_tensor_list(self, value, minimum_rank=1)\n\n    def _from_compatible_tensor_list(self, tensor_list):\n        return type_spec.batchable_from_tensor_list(self, tensor_list)\n\n    @property\n    def _flat_tensor_specs(self):\n        return type_spec.get_batchable_flat_tensor_specs(self)",
    "docstring": "Base class for TypeSpecs for BatchableExtensionTypes.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "ClassDef name:BatchableExtensionTypeSpec Assign Call FunctionDef name:_batch arg:self arg:batch_size arguments arg arg Return return:yes Call FunctionDef name:_unbatch arg:self arguments arg Return return:yes Call FunctionDef name:_to_tensor_list arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:_to_batched_tensor_list arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:_from_compatible_tensor_list arg:self arg:tensor_list arguments arg arg Return return:yes Call FunctionDef name:_flat_tensor_specs arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "createDimension",
    "source_code": "def createDimension(self, name, length):\n    if length is None and self._dims:\n        raise ValueError('Only first dimension may be unlimited!')\n    self.dimensions[name] = length\n    self._dims.append(name)",
    "docstring": "Adds a dimension to the Dimension section of the NetCDF data structure. Note that this function merely adds a new dimension that the variables can reference. The values for the dimension, if desired, should be added as a variable using , referring to this dimension. Parameters ---------- name : str Name of the dimension (Eg, 'lat' or 'time'). length : int Length of the dimension. See Also -------- createVariable",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_netcdf.py",
    "ast_data": "FunctionDef name:createDimension arg:self arg:name arg:length arguments arg arg arg If BoolOp Compare Raise Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "confidence_interval",
    "source_code": "def confidence_interval(self, confidence_level: DecimalNumber=0.95) -> ConfidenceInterval:\n    if self._ci is not None and confidence_level == self._ci_cl:\n        return self._ci\n    if not 0 < confidence_level < 1:\n        raise ValueError('Confidence level must be between 0 and 1.')\n    allowance = self._allowance(confidence_level=confidence_level)\n    diff_means = self._mean_samples - self._mean_control\n    low = diff_means - allowance\n    high = diff_means + allowance\n    if self._alternative == 'greater':\n        high = [np.inf] * len(diff_means)\n    elif self._alternative == 'less':\n        low = [-np.inf] * len(diff_means)\n    self._ci_cl = confidence_level\n    self._ci = ConfidenceInterval(low=low, high=high)\n    return self._ci",
    "docstring": "Compute the confidence interval for the specified confidence level. Parameters ---------- confidence_level : float, optional Confidence level for the computed confidence interval. Default is .95. Returns ------- ci : ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multicomp.py",
    "ast_data": "FunctionDef name:confidence_interval arg:self arg:confidence_level arguments arg arg If BoolOp Compare Compare Return return:yes If Compare Raise Call Assign Call Assign Assign Assign If Compare Assign Call If Compare Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_init_uniform_ray_dataset",
    "source_code": "def _init_uniform_ray_dataset(self) -> None:\n    self._ray_sampler = UniformRaySampler(self._min_depth, self._max_depth, self._ndc, device=self._device, dtype=self._dtype)\n    self._ray_sampler.calc_ray_params(self._cameras)",
    "docstring": "Initialize a uniform ray sampler and calculates dataset ray parameters.",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\data_utils.py",
    "ast_data": "FunctionDef name:_init_uniform_ray_dataset arg:self arguments arg Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "copy",
    "source_code": "def copy(self):\n    rccopy = RcParams()\n    for k in self:\n        rccopy._set(k, self._get(k))\n    return rccopy",
    "docstring": "Copy this RcParams instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Assign Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "getfullargspec",
    "source_code": "def getfullargspec(obj):\n    decorators, target = tf_decorator.unwrap(obj)\n    for d in decorators:\n        if d.decorator_argspec is not None:\n            return _convert_maybe_argspec_to_fullargspec(d.decorator_argspec)\n    return _getfullargspec(target)",
    "docstring": "TFDecorator-aware replacement for . This wrapper emulates in[^)]* Python2. Args: obj: A callable, possibly decorated. Returns: The that describes the signature of the outermost decorator that changes the callable's signature. If the callable is not decorated, will be called directly on the callable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:getfullargspec arg:obj arguments arg Assign Call For If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "to_mlab_linkage",
    "source_code": "@xp_capabilities()\ndef to_mlab_linkage(Z):\n    xp = array_namespace(Z)\n    Z = _asarray(Z, dtype=xp.float64, xp=xp)\n    if Z.shape in ((), (0,)):\n        return xp_copy(Z, xp=xp)\n    _is_valid_linkage(Z, throw=True, name='Z', xp=xp)\n    return xp.concat((Z[:, :2] + 1.0, Z[:, 2:3]), axis=1)",
    "docstring": "Convert a linkage matrix to a MATLAB(TM) compatible one. Converts a linkage matrix `scipy.cluster.hierarchy.to_mlab_linkage` uses 1-indexing for all the clusters (instead of 0-indexing). Also, the last column of the original linkage matrix has been dropped.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:to_mlab_linkage arg:Z arguments arg Assign Call Assign Call If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_convert_level_number",
    "source_code": "def _convert_level_number(level_num: int, columns: Index):\n    if level_num in columns.names:\n        return columns.names[level_num]\n    return level_num",
    "docstring": "Logic for converting the level number to something we can safely pass to swaplevel. If matches a column name return the name from position , otherwise return .",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\reshape.py",
    "ast_data": "FunctionDef name:_convert_level_number arg:level_num arg:columns arguments arg arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reset_peak_host_memory_stats",
    "source_code": "def reset_peak_host_memory_stats() -> None:\n    return torch._C._cuda_resetPeakHostMemoryStats()",
    "docstring": "Reset the \"peak\" stats tracked by the host memory allocator. See :func: for details. Peak stats correspond to the key in each individual stat dict.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:reset_peak_host_memory_stats arguments Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "accepts",
    "source_code": "def accepts(self, item: Any) -> bool:\n    if self.item_classes:\n        return isinstance(item, self.item_classes)\n    return True",
    "docstring": "Return `itemScrapy items TrueFalse` otherwise :rtype: bool",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\feedexport.py",
    "ast_data": "FunctionDef name:accepts arg:self arg:item arguments arg arg If Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "FloatingDtype",
    "source_code": "class FloatingDtype(NumericDtype):\n    _internal_fill_value = np.nan\n    _default_np_dtype = np.dtype(np.float64)\n    _checker = is_float_dtype\n\n    @classmethod\n    def construct_array_type(cls) -> type[FloatingArray]:\n        return FloatingArray\n\n    @classmethod\n    def _get_dtype_mapping(cls) -> dict[np.dtype, FloatingDtype]:\n        return NUMPY_FLOAT_TO_DTYPE\n\n    @classmethod\n    def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:\n        return values.astype(dtype, copy=copy)",
    "docstring": "An ExtensionDtype to hold a single size of floating dtype. These specific implementations are subclasses of the non-public FloatingDtype. For example we have Float32Dtype to represent float32. The attributes name & type are set when these subclasses are created.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\arrays\\floating.py",
    "ast_data": "ClassDef name:FloatingDtype Assign Assign Call Assign FunctionDef name:construct_array_type arg:cls arguments arg Return return:yes FunctionDef name:_get_dtype_mapping arg:cls arguments arg Return return:yes FunctionDef name:_safe_cast arg:cls arg:values arg:dtype arg:copy arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_matrix_conv",
    "source_code": "def _matrix_conv(self, m1, m2):\n    n = m1[0, 0, 0].shape.as_list()[0]\n    if n != m2[0, 0, 0].shape.as_list()[0]:\n        raise ValueError(f'The entries in matrices m1 and m2 must have the same dimensions. Received m1[0, 0, 0].shape={m1[0, 0, 0].shape} and m2[0, 0, 0].shape={m2[0, 0, 0].shape}.')\n    k = int(np.cbrt(len(m1)))\n    l = int(np.cbrt(len(m2)))\n    result = {}\n    size = k + l - 1\n    for i in range(size):\n        for j in range(size):\n            for r in range(size):\n                result[i, j, r] = array_ops.zeros([n, n], self.dtype)\n                for index1 in range(min(k, i + 1)):\n                    for index2 in range(min(k, j + 1)):\n                        for index3 in range(min(k, r + 1)):\n                            if i - index1 < l and j - index2 < l and (r - index3 < l):\n                                result[i, j, r] += math_ops.matmul(m1[index1, index2, index3], m2[i - index1, j - index2, r - index3])\n    return result",
    "docstring": "Matrix convolution. Args: m1: is a k x k x k dictionary, each element is a n x n matrix. m2: is a l x l x l dictionary, each element is a n x n matrix. Returns: (k + l - 1) x (k + l - 1) x (k + l - 1) dictionary each element is a n x n matrix. Raises: ValueError: if the entries of m1 and m2 are of different dimensions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_matrix_conv arg:self arg:m1 arg:m2 arguments arg arg arg Assign Call If Compare Call Raise Call Assign Call Call Call Assign Call Call Call Assign Assign For Call For Call For Call Assign Call For Call Call For Call Call For Call Call If BoolOp Compare Compare Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "fake_tensor_prop",
    "source_code": "def fake_tensor_prop(gm: GraphModule, example_inputs: Sequence[InputType], force_allow_non_fake_inputs: bool=False) -> torch._subclasses.FakeTensorMode:\n    with enable_python_dispatcher():\n        fake_mode = detect_fake_mode(example_inputs)\n        if not fake_mode:\n            fake_mode = torch._subclasses.FakeTensorMode(allow_non_fake_inputs=True)\n            FakeTensorProp(gm, mode=fake_mode).propagate(*example_inputs)\n        else:\n            ctx = contextlib.nullcontext() if not force_allow_non_fake_inputs else mock.patch.object(fake_mode, 'allow_non_fake_inputs', True)\n            with ctx:\n                FakeTensorProp(gm, mode=fake_mode).propagate_dont_convert_inputs(*example_inputs)\n    return fake_mode",
    "docstring": "If we can not detect fake mode from the context of inputs, create one. The created fake mode will be returned.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx.py",
    "ast_data": "FunctionDef name:fake_tensor_prop arg:gm arg:example_inputs arg:force_allow_non_fake_inputs arguments arg arg arg With Call Assign Call If Assign Call Call Call Assign Call Call With Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_leaf_sorter",
    "source_code": "def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]:\n    if labels[0].size == 0:\n        return np.empty(0, dtype=np.intp)\n    if len(labels) == 1:\n        return get_group_index_sorter(ensure_platform_int(labels[0]))\n    tic = labels[0][:-1] != labels[0][1:]\n    for lab in labels[1:-1]:\n        tic |= lab[:-1] != lab[1:]\n    starts = np.hstack(([True], tic, [True])).nonzero()[0]\n    lab = ensure_int64(labels[-1])\n    return lib.get_level_sorter(lab, ensure_platform_int(starts))",
    "docstring": "Returns sorter for the inner most level while preserving the order of higher levels. Parameters ---------- labels : list[np.ndarray] Each ndarray has signed integer dtype, not necessarily identical. Returns ------- np.ndarray[np.intp]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_get_leaf_sorter arg:labels arguments arg If Compare Return return:yes Call If Compare Call Return return:yes Call Call Assign Compare For Compare Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "make_nonnegative",
    "source_code": "def make_nonnegative(X, min_value=0):\n    min_ = X.min()\n    if min_ < min_value:\n        if sparse.issparse(X):\n            raise ValueError('Cannot make the data matrix nonnegative because it is sparse. Adding a value to every entry would make it no longer sparse.')\n        X = X + (min_value - min_)\n    return X",
    "docstring": "Ensure >= . Parameters ---------- X : array-like The matrix to make non-negative. min_value : float, default=0 The threshold value. Returns ------- array-like The thresholded array. Raises ------ ValueError When X is sparse.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\extmath.py",
    "ast_data": "FunctionDef name:make_nonnegative arg:X arg:min_value arguments arg arg Assign Call If Compare If Call Raise Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name_scope",
    "source_code": "@property\ndef name_scope(self):\n    if tf2.enabled():\n        return self._name_scope\n    else:\n        return ops.name_scope(self._scope_name, skip_on_eager=False)",
    "docstring": "Returns a instance for this class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\module\\module.py",
    "ast_data": "FunctionDef name:name_scope arg:self arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_should_dump_tensor",
    "source_code": "def _should_dump_tensor(self, op_type, dtype):\n    should_dump = True\n    if self._op_regex:\n        should_dump = should_dump and re.match(self._op_regex, op_type)\n    if self._tensor_dtypes:\n        if isinstance(self._tensor_dtypes, (list, tuple)):\n            should_dump = should_dump and any((dtype == dtype_item for dtype_item in self._tensor_dtypes))\n        else:\n            should_dump = should_dump and self._tensor_dtypes(dtype)\n    return should_dump",
    "docstring": "Determine if the given tensor's value will be dumped. The determination is made given the configurations such as , . Args: op_type: Name of the op's type, as a string (e.g., \"MatMul\"). dtype: The dtype of the tensor, as a object. Returns: A bool indicating whether the tensor's value will be dumped.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py",
    "ast_data": "FunctionDef name:_should_dump_tensor arg:self arg:op_type arg:dtype arguments arg arg arg Assign If Assign BoolOp Call If If Call Assign BoolOp Call Compare Assign BoolOp Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "preserve_original_messages",
    "source_code": "def preserve_original_messages(self) -> None:\n    raise NotImplementedError",
    "docstring": "Preserve original translatable messages.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "FunctionDef name:preserve_original_messages arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "merge_masks",
    "source_code": "def merge_masks(self, attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor], query: Tensor) -> tuple[Optional[Tensor], Optional[int]]:\n    mask_type: Optional[int] = None\n    merged_mask: Optional[Tensor] = None\n    if key_padding_mask is not None:\n        mask_type = 1\n        merged_mask = key_padding_mask\n    if attn_mask is not None:\n        batch_size, seq_len, _ = query.shape\n        mask_type = 2\n        if attn_mask.dim() == 3:\n            attn_mask_expanded = attn_mask.view(batch_size, -1, seq_len, seq_len)\n        else:\n            attn_mask_expanded = attn_mask.view(1, 1, seq_len, seq_len).expand(batch_size, self.num_heads, -1, -1)\n        merged_mask = attn_mask_expanded\n        if key_padding_mask is not None:\n            key_padding_mask_expanded = key_padding_mask.view(batch_size, 1, 1, seq_len).expand(-1, self.num_heads, -1, -1)\n            merged_mask = attn_mask_expanded + key_padding_mask_expanded\n    return (merged_mask, mask_type)",
    "docstring": "Determine mask type and combine masks if necessary. If only one mask is provided, that mask and the corresponding mask type will be returned. If both masks are provided, they will be both expanded to shape `` Returns: merged_mask: merged mask mask_type: merged mask type (0, 1, or 2)",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "FunctionDef name:merge_masks arg:self arg:attn_mask arg:key_padding_mask arg:query arguments arg arg arg arg If Compare Assign Assign If Compare Assign Assign If Compare Call Assign Call Assign Call Call Assign If Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    Y = np.array([_predict_binary(e, X) for e in self.estimators_], order='F', dtype=np.float64).T\n    pred = pairwise_distances_argmin(Y, self.code_book_, metric='euclidean')\n    return self.classes_[pred]",
    "docstring": "Predict multi-class targets using underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. Returns ------- y : ndarray of shape (n_samples,) Predicted multi-class targets.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_can_hold_identifiers_and_holds_name",
    "source_code": "@final\ndef _can_hold_identifiers_and_holds_name(self, name) -> bool:\n    if is_object_dtype(self.dtype) or is_string_dtype(self.dtype) or isinstance(self.dtype, CategoricalDtype):\n        return name in self\n    return False",
    "docstring": "Faster check for `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_can_hold_identifiers_and_holds_name arg:self arg:name arguments arg arg If BoolOp Call Call Call Return return:yes Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_update_run_calls_state",
    "source_code": "def _update_run_calls_state(self, run_call_count, fetches, feed_dict, is_callable_runner=False):\n    self._run_call_count = run_call_count\n    self._feed_dict = feed_dict\n    self._run_description = cli_shared.get_run_short_description(run_call_count, fetches, feed_dict, is_callable_runner=is_callable_runner)\n    self._run_through_times -= 1\n    self._run_info = cli_shared.get_run_start_intro(run_call_count, fetches, feed_dict, self._tensor_filters, is_callable_runner=is_callable_runner)",
    "docstring": "Update the internal state with regard to run() call history. Args: run_call_count: (int) Number of run() calls that have occurred. fetches: a node/tensor or a list of node/tensor that are the fetches of the run() call. This is the same as the fetches argument to the run() call. feed_dict: None of a dict. This is the feed_dict argument to the run() call. is_callable_runner: (bool) whether a runner returned by Session.make_callable is being run.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\local_cli_wrapper.py",
    "ast_data": "FunctionDef name:_update_run_calls_state arg:self arg:run_call_count arg:fetches arg:feed_dict arg:is_callable_runner arguments arg arg arg arg arg Assign Assign Assign Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "_fix_real_lt_zero",
    "source_code": "def _fix_real_lt_zero(x):\n    x = asarray(x)\n    if any(isreal(x) & (x < 0)):\n        x = _tocomplex(x)\n    return x",
    "docstring": "Convert to complex if it has real, negative components. Otherwise, output is just the array version of the input (via asarray). Parameters ---------- x : array_like Returns ------- array Examples -------- >>> import numpy as np >>> np.lib.scimath._fix_real_lt_zero([1,2]) array([1, 2]) >>> np.lib.scimath._fix_real_lt_zero([-1,2]) array([-1.+0.j, 2.+0.j])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_scimath_impl.py",
    "ast_data": "FunctionDef name:_fix_real_lt_zero arg:x arguments arg Assign Call If Call Call Compare Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "parse_loop_header",
    "source_code": "def parse_loop_header(loophead):\n    loophead = stripast.sub('', loophead)\n    names = []\n    reps = named_re.findall(loophead)\n    nsub = None\n    for rep in reps:\n        name = rep[0]\n        vals = parse_values(rep[1])\n        size = len(vals)\n        if nsub is None:\n            nsub = size\n        elif nsub != size:\n            msg = 'Mismatch in number of values, %d != %d\\n%s = %s'\n            raise ValueError(msg % (nsub, size, name, vals))\n        names.append((name, vals))\n    excludes = []\n    for obj in exclude_re.finditer(loophead):\n        span = obj.span()\n        endline = loophead.find('\\n', span[1])\n        substr = loophead[span[1]:endline]\n        ex_names = exclude_vars_re.findall(substr)\n        excludes.append(dict(ex_names))\n    dlist = []\n    if nsub is None:\n        raise ValueError('No substitution variables found')\n    for i in range(nsub):\n        tmp = {name: vals[i] for name, vals in names}\n        dlist.append(tmp)\n    return dlist",
    "docstring": "Find all named replacements in the header Returns a list of dictionaries, one for each loop iteration, where each key is a name to be substituted and the corresponding value is the replacement string. Also return a list of exclusions. The exclusions are dictionaries of key value pairs. There can be more than one exclusion. [{'var1':'value1', 'var2', 'value2'[,...]}, ...]",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\conv_template.py",
    "ast_data": "FunctionDef name:parse_loop_header arg:loophead arguments arg Assign Call Assign Assign Call Assign For Assign Assign Call Assign Call If Compare Assign If Compare Assign Raise Call Call Assign For Call Assign Call Assign Call Assign Assign Call Call Call Assign If Compare Raise Call For Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_format_ticks",
    "source_code": "def _format_ticks(self, idx, direction, factor, levels):\n    fmt = _api.check_getitem({1: self.tick_formatter1, 2: self.tick_formatter2}, idx=idx)\n    return fmt.format_ticks(levels) if isinstance(fmt, mticker.Formatter) else fmt(direction, factor, levels)",
    "docstring": "Helper to support both standard formatters (inheriting from ) and axisartist-specific ones; should be called instead of directly calling ``. This method should be considered as a temporary workaround which will be removed in the future at the same time as axisartist-specific formatters.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py",
    "ast_data": "FunctionDef name:_format_ticks arg:self arg:idx arg:direction arg:factor arg:levels arguments arg arg arg arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "clean",
    "source_code": "def clean(self):\n    return self.cleaned_data",
    "docstring": "Hook for doing any extra form-wide cleaning after Field.clean() has been called on every field. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field named '__all__'.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:clean arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "lookup",
    "source_code": "def lookup(name):\n    if not isinstance(name, str):\n        raise TypeError('Expected `name` to be a string; got %r' % (name,))\n    if name not in _NAME_TO_TYPE_SPEC:\n        raise ValueError('No TypeSpec has been registered with name %r' % (name,))\n    return _NAME_TO_TYPE_SPEC[name]",
    "docstring": "Returns the TypeSpec that has been registered with name .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec_registry.py",
    "ast_data": "FunctionDef name:lookup arg:name arguments arg If Call Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "exit_loop_section",
    "source_code": "def exit_loop_section(self, section_id):\n    self._connect_nodes(self.leaves, self.section_entry[section_id])\n    for reentry in self.continues[section_id]:\n        guard_ends = self._connect_jump_to_finally_sections(reentry)\n        self._connect_nodes(guard_ends, self.section_entry[section_id])\n    self.leaves = set((self.section_entry[section_id],))\n    del self.continues[section_id]\n    del self.section_entry[section_id]",
    "docstring": "Exits a loop section.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:exit_loop_section arg:self arg:section_id arguments arg arg Call For Assign Call Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "SimpleLineShadow",
    "source_code": "class SimpleLineShadow(AbstractPathEffect):\n\n    def __init__(self, offset=(2, -2), shadow_color='k', alpha=0.3, rho=0.3, **kwargs):\n        super().__init__(offset)\n        if shadow_color is None:\n            self._shadow_color = shadow_color\n        else:\n            self._shadow_color = mcolors.to_rgba(shadow_color)\n        self._alpha = alpha\n        self._rho = rho\n        self._gc = kwargs\n\n    def draw_path(self, renderer, gc, tpath, affine, rgbFace):\n        gc0 = renderer.new_gc()\n        gc0.copy_properties(gc)\n        if self._shadow_color is None:\n            r, g, b = (gc0.get_foreground() or (1.0, 1.0, 1.0))[:3]\n            shadow_rgbFace = (r * self._rho, g * self._rho, b * self._rho)\n        else:\n            shadow_rgbFace = self._shadow_color\n        gc0.set_foreground(shadow_rgbFace)\n        gc0.set_alpha(self._alpha)\n        gc0 = self._update_gc(gc0, self._gc)\n        renderer.draw_path(gc0, tpath, affine + self._offset_transform(renderer))\n        gc0.restore()",
    "docstring": "A simple shadow via a line.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "ClassDef name:SimpleLineShadow FunctionDef name:__init__ arg:self arg:offset arg:shadow_color arg:alpha arg:rho arguments arg arg arg arg arg arg Call Call If Compare Assign Assign Call Assign Assign Assign FunctionDef name:draw_path arg:self arg:renderer arg:gc arg:tpath arg:affine arg:rgbFace arguments arg arg arg arg arg arg Assign Call Call If Compare Assign BoolOp Call Assign Assign Call Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_gather",
    "source_code": "@tf_export(v1=['batch_gather'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated('2017-10-25', '`tf.batch_gather` is deprecated, please use `tf.gather` with `batch_dims=tf.rank(indices) - 1` instead.')\ndef batch_gather(params, indices, name=None):\n    with ops.name_scope(name, 'BatchGather', [params, indices]):\n        indices = ops.convert_to_tensor(indices, name='indices')\n        params = ops.convert_to_tensor(params, name='params')\n        if indices.shape.ndims is None:\n            raise ValueError('batch_gather does not allow indices with unknown shape.')\n        return _batch_gather(params, indices, batch_dims=indices.shape.ndims - 1)",
    "docstring": "Gather slices from params according to indices with leading batch dims.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:batch_gather arg:params arg:indices arg:name arguments arg arg arg With Call Assign Call Assign Call If Compare Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_node_dependencies",
    "source_code": "def _get_node_dependencies(self, proto):\n    dependencies = {ref.local_name: ref.node_id for ref in proto.dependencies}\n    kind = proto.WhichOneof('kind')\n    if kind == 'function':\n        concrete_functions = proto.function.concrete_functions\n        for fn_name in concrete_functions:\n            for bound_input in self._proto.concrete_functions[fn_name].bound_inputs:\n                dependencies[bound_input] = bound_input\n    elif kind == 'bare_concrete_function':\n        fn_name = proto.bare_concrete_function.concrete_function_name\n        for bound_input in self._proto.concrete_functions[fn_name].bound_inputs:\n            dependencies[bound_input] = bound_input\n    elif kind == 'resource':\n        for child in proto.children:\n            if child.local_name == '_create_resource':\n                dependencies['_create_resource'] = child.node_id\n    return dependencies",
    "docstring": "Returns a dictionary of all dependencies of an object. Args: proto: A SavedObject proto. Returns: Dict mapping string dependency name *or* int node id to the node id. The int node id key is used for mapping function captures.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_get_node_dependencies arg:self arg:proto arguments arg arg Assign Assign Call If Compare Assign For For Assign If Compare Assign For Assign If Compare For If Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "apply_raw",
    "source_code": "def apply_raw(self, engine='python', engine_kwargs=None):\n\n    def wrap_function(func):\n\n        def wrapper(*args, **kwargs):\n            result = func(*args, **kwargs)\n            if isinstance(result, str):\n                result = np.array(result, dtype=object)\n            return result\n        return wrapper\n    if engine == 'numba':\n        args, kwargs = prepare_function_arguments(self.func, self.args, self.kwargs, num_required_args=1)\n        nb_looper = generate_apply_looper(self.func, **get_jit_arguments(engine_kwargs))\n        result = nb_looper(self.values, self.axis, *args)\n        result = np.squeeze(result)\n    else:\n        result = np.apply_along_axis(wrap_function(self.func), self.axis, self.values, *self.args, **self.kwargs)\n    if result.ndim == 2:\n        return self.obj._constructor(result, index=self.index, columns=self.columns)\n    else:\n        return self.obj._constructor_sliced(result, index=self.agg_axis)",
    "docstring": "apply to the values as a numpy array",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:apply_raw arg:self arg:engine arg:engine_kwargs arguments arg arg arg FunctionDef name:wrap_function arg:func arguments arg FunctionDef name:wrapper arguments arg arg Assign Call If Call Assign Call Return return:yes Return return:yes If Compare Assign Call Assign Call Call Assign Call Assign Call Assign Call Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_find_binning_thresholds",
    "source_code": "def _find_binning_thresholds(col_data, max_bins):\n    missing_mask = np.isnan(col_data)\n    if missing_mask.any():\n        col_data = col_data[~missing_mask]\n    col_data = np.sort(col_data)\n    distinct_values = np.unique(col_data).astype(X_DTYPE)\n    if len(distinct_values) <= max_bins:\n        midpoints = distinct_values[:-1] + distinct_values[1:]\n        midpoints *= 0.5\n    else:\n        percentiles = np.linspace(0, 100, num=max_bins + 1)\n        percentiles = percentiles[1:-1]\n        midpoints = np.percentile(col_data, percentiles, method='midpoint').astype(X_DTYPE)\n        assert midpoints.shape[0] == max_bins - 1\n    np.clip(midpoints, a_min=None, a_max=ALMOST_INF, out=midpoints)\n    return midpoints",
    "docstring": "Extract quantiles from a continuous feature. Missing values are ignored for finding the thresholds. Parameters ---------- col_data : array-like, shape (n_samples,) The continuous feature to bin. max_bins: int The maximum number of bins to use for non-missing values. If for a given feature the number of unique values is less than ``, then those unique values will be used to compute the bin thresholds, instead of the quantiles Return ------ binning_thresholds : ndarray of shape(min(max_bins, n_unique_values) - 1,) The increasing numeric values that can be used to separate the bins. A given value x will be mapped into bin value i iff bining_thresholds[i - 1] < x <= binning_thresholds[i]",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\binning.py",
    "ast_data": "FunctionDef name:_find_binning_thresholds arg:col_data arg:max_bins arguments arg arg Assign Call If Call Assign Assign Call Assign Call Call If Compare Call Assign Assign Call Assign Assign Call Call Compare Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "build_plot_signature",
    "source_code": "def build_plot_signature(cls):\n    sig = inspect.signature(cls)\n    params = [inspect.Parameter('args', inspect.Parameter.VAR_POSITIONAL), inspect.Parameter('data', inspect.Parameter.KEYWORD_ONLY, default=None)]\n    params.extend([inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=None) for name in PROPERTIES])\n    new_sig = sig.replace(parameters=params)\n    cls.__signature__ = new_sig\n    known_properties = textwrap.fill(', '.join([f'|{p}|' for p in PROPERTIES]), width=78, subsequent_indent=' ' * 8)\n    if cls.__doc__ is not None:\n        cls.__doc__ = cls.__doc__.format(known_properties=known_properties)\n    return cls",
    "docstring": "Decorator function for giving Plot a useful signature. Currently this mostly saves us some duplicated typing, but we would like eventually to have a way of registering new semantic properties, at which point dynamic signature generation would become more important.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:build_plot_signature arg:cls arguments arg Assign Call Assign Call Call Call Call Assign Call Assign Assign Call Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_clang_compiler_path_win",
    "source_code": "def set_clang_compiler_path_win(environ_cp):\n    default_clang_path = 'C:/Program Files/LLVM/bin/clang.exe'\n    if not os.path.exists(default_clang_path):\n        default_clang_path = shutil.which('clang') or ''\n    clang_compiler_path = prompt_loop_or_load_from_env(environ_cp, var_name='CLANG_COMPILER_PATH', var_default=default_clang_path, ask_for_var='Please specify the path to clang executable.', check_success=os.path.exists, resolve_symlinks=True, error_msg='Invalid clang path. %s cannot be found. Note that Clang is nowpreferred compiler. You may use MSVC by removing --config=win_clang')\n    write_action_env_to_bazelrc('CLANG_COMPILER_PATH', clang_compiler_path)\n    write_to_bazelrc(f'build --repo_env=CC=\"{clang_compiler_path}\"')\n    write_to_bazelrc(f'build --repo_env=BAZEL_COMPILER=\"{clang_compiler_path}\"')\n    return clang_compiler_path",
    "docstring": "Set CLANG_COMPILER_PATH and environment variables. Loop over user prompts for clang path until receiving a valid response. Default is used if no input is given. Set CLANG_COMPILER_PATH and write environment variables CC and BAZEL_COMPILER to .bazelrc. Args: environ_cp: (Dict) copy of the os.environ. Returns: string value for clang_compiler_path.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:set_clang_compiler_path_win arg:environ_cp arguments arg Assign If Call Assign BoolOp Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "GeometryAwareDescriptorMatcher",
    "source_code": "class GeometryAwareDescriptorMatcher(Module):\n    known_modes: ClassVar[List[str]] = ['fginn', 'adalam']\n\n    def __init__(self, match_mode: str='fginn', params: Optional[Dict[str, Tensor]]=None) -> None:\n        super().__init__()\n        _match_mode: str = match_mode.lower()\n        if _match_mode not in self.known_modes:\n            raise NotImplementedError(f'{match_mode} is not supported. Try one of {self.known_modes}')\n        self.match_mode = _match_mode\n        self.params = params or {}\n\n    def forward(self, desc1: Tensor, desc2: Tensor, lafs1: Tensor, lafs2: Tensor) -> Tuple[Tensor, Tensor]:\n        if self.match_mode == 'fginn':\n            params = _get_default_fginn_params()\n            params.update(self.params)\n            out = match_fginn(desc1, desc2, lafs1, lafs2, params['th'], params['spatial_th'], params['mutual'])\n        elif self.match_mode == 'adalam':\n            _params = get_adalam_default_config()\n            _params.update(self.params)\n            out = match_adalam(desc1, desc2, lafs1, lafs2, config=_params)\n        else:\n            raise NotImplementedError\n        return out",
    "docstring": "Module version of matching functions. See :func:, :func:, :func: or :func: for more details. Args: match_mode: type of matching, can be . th: threshold on distance ratio, or other quality measure.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\matching.py",
    "ast_data": "ClassDef name:GeometryAwareDescriptorMatcher FunctionDef name:__init__ arg:self arg:match_mode arg:params arguments arg arg arg Call Call Call If Compare Raise Call Assign Assign BoolOp FunctionDef name:forward arg:self arg:desc1 arg:desc2 arg:lafs1 arg:lafs2 arguments arg arg arg arg arg If Compare Assign Call Call Assign Call If Compare Assign Call Call Assign Call Raise Return return:yes"
  },
  {
    "library": "django",
    "name": "all",
    "source_code": "def all(self):\n    return self._chain()",
    "docstring": "Return a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:all arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_convert_xy_units",
    "source_code": "def _convert_xy_units(self, xy):\n    x = self.convert_xunits(xy[0])\n    y = self.convert_yunits(xy[1])\n    return (x, y)",
    "docstring": "Convert x and y units for a tuple (x, y).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:_convert_xy_units arg:self arg:xy arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ApproxTopKGradient",
    "source_code": "@ops.RegisterGradient('ApproxTopK')\ndef _ApproxTopKGradient(op: ops.Operation, grad, _):\n    idx_shape = op.outputs[1].shape\n    lifted_idx_shape = idx_shape + [1]\n    flat_shape_len = functools.reduce(operator.mul, idx_shape)\n    rank = idx_shape.rank\n    reduction_dim = op.get_attr('reduction_dimension')\n    if reduction_dim < 0:\n        reduction_dim = rank + reduction_dim\n\n    def GetLiftedIdx(d):\n        if d == reduction_dim:\n            return array_ops.reshape(op.outputs[1], lifted_idx_shape)\n        iota_len = idx_shape[d]\n        iota_shape = list(itertools.repeat(1, rank + 1))\n        iota_shape[d] = iota_len\n        iota = array_ops.reshape(math_ops.range(iota_len), iota_shape)\n        return array_ops.broadcast_to(iota, lifted_idx_shape)\n    lifted_idx = array_ops.concat(list((GetLiftedIdx(d) for d in range(rank))), axis=rank)\n    flat_idx = array_ops.reshape(lifted_idx, [flat_shape_len, rank])\n    flat_grad = array_ops.reshape(grad, [flat_shape_len])\n    return array_ops.scatter_nd(flat_idx, flat_grad, op.inputs[0].shape)",
    "docstring": "Return the gradients for ApproxTopK. Args: op: The ApproxTopK for which we need to generate gradients. grad: The gradients for backprop. Returns: Scattered gradient based on the top-k indices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_ApproxTopKGradient arg:op arg:grad arg:_ arguments arg arg arg Assign Assign Assign Call Assign Assign Call If Compare Assign FunctionDef name:GetLiftedIdx arg:d arguments arg If Compare Return return:yes Call Assign Assign Call Call Assign Assign Call Call Return return:yes Call Assign Call Call Call Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_builtin_function",
    "source_code": "@classmethod\ndef from_builtin_function(cls, builtin_function: types.BuiltinFunctionType) -> OpName:\n    op = builtin_function.__name__\n    module = builtin_function.__module__\n    return cls.from_qualified_name(module + '::' + op)",
    "docstring": "From a builtin function, e.g. operator.add, math.ceil, etc, get the OpName. FX graph uses built-in functions to caculate sympy expression. This function is used to get the OpName from a builtin function. Args: builtin_function (types.BuiltinFunctionType): operator.add, math.ceil, etc. Returns: OpName: _description_",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\registration.py",
    "ast_data": "FunctionDef name:from_builtin_function arg:cls arg:builtin_function arguments arg arg Assign Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "polynomial_matrix",
    "source_code": "def polynomial_matrix(x, powers, out):\n    for i in range(x.shape[0]):\n        for j in range(powers.shape[0]):\n            out[i, j] = np.prod(x[i] ** powers[j])",
    "docstring": "Evaluate monomials, with exponents from , at .",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_rbfinterp_pythran.py",
    "ast_data": "FunctionDef name:polynomial_matrix arg:x arg:powers arg:out arguments arg arg arg For Call For Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "add_ephemeral_timeout_increase_for_distributed",
    "source_code": "def add_ephemeral_timeout_increase_for_distributed(time_saved_ns: int) -> int:\n    if not torch.distributed.is_available() or not torch.distributed.is_initialized():\n        return 0\n    increased_timeout_sec = int(time_saved_ns // 1000000000.0)\n    if config.is_fbcode():\n        fudge_factor = torch._utils_internal.justknobs_getval_int('pytorch/remote_cache:ephemeral_timeout_fudge_factor_percentage')\n        log.info('Ephemeral NCCL timeout increase fudge factor %d and original increase value %d', fudge_factor, increased_timeout_sec)\n        increased_timeout_sec += int(increased_timeout_sec * fudge_factor / 100)\n    log.info('Increasing NCCL timeout by %d', increased_timeout_sec)\n    dist.distributed_c10d._add_ephemeral_timeout_for_all_pgs(timedelta(seconds=increased_timeout_sec))\n    return increased_timeout_sec",
    "docstring": "Ephemerally increases the NCCL timeout when compiling for a distributed job Returns amount of seconds increased",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:add_ephemeral_timeout_increase_for_distributed arg:time_saved_ns arguments arg If BoolOp Call Call Return return:yes Assign Call If Call Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, s, *, textprops=None, multilinebaseline=False):\n    if textprops is None:\n        textprops = {}\n    self._text = mtext.Text(0, 0, s, **textprops)\n    super().__init__()\n    self._children = [self._text]\n    self.offset_transform = mtransforms.Affine2D()\n    self._baseline_transform = mtransforms.Affine2D()\n    self._text.set_transform(self.offset_transform + self._baseline_transform)\n    self._multilinebaseline = multilinebaseline",
    "docstring": "Parameters ---------- s : str The text to be displayed. textprops : dict, default: {} Dictionary of keyword parameters to be passed to the instance in the TextArea. multilinebaseline : bool, default: False Whether the baseline for multiline text is adjusted so that it is (approximately) center-aligned with single-line text.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:s arguments arg arg arg arg If Compare Assign Assign Call Call Call Assign Assign Call Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_row_partition_type_tensor_pairs_tail",
    "source_code": "def _get_row_partition_type_tensor_pairs_tail(partition):\n    if partition._has_precomputed_value_rowids():\n        return ('VALUE_ROWIDS', partition.value_rowids())\n    else:\n        return ('ROW_SPLITS', partition.row_splits())",
    "docstring": "Gets a row partition type tensor pair for the tail. If value_rowid is defined, then it is used. Otherwise, row_splits are used. Args: partition: a RowPartition. Returns: A list of (row_partition_type, row_partition_tensor) pairs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_get_row_partition_type_tensor_pairs_tail arg:partition arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "math_to_image",
    "source_code": "def math_to_image(s, filename_or_obj, prop=None, dpi=None, format=None, *, color=None):\n    from matplotlib import figure\n    parser = MathTextParser('path')\n    width, height, depth, _, _ = parser.parse(s, dpi=72, prop=prop)\n    fig = figure.Figure(figsize=(width / 72.0, height / 72.0))\n    fig.text(0, depth / height, s, fontproperties=prop, color=color)\n    fig.savefig(filename_or_obj, dpi=dpi, format=format)\n    return depth",
    "docstring": "Given a math expression, renders it in a closely-clipped bounding box to an image file. Parameters ---------- s : str A math expression. The math portion must be enclosed in dollar signs. filename_or_obj : str or path-like or file-like Where to write the image data. prop : , optional The size and style of the text. dpi : float, optional The output dpi. If not set, the dpi is determined as for . format : str, optional The output format, e.g., 'svg', 'pdf', 'ps' or 'png'. If not set, the format is determined as for . color : str, optional Foreground color, defaults to :rc:.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\mathtext.py",
    "ast_data": "FunctionDef name:math_to_image arg:s arg:filename_or_obj arg:prop arg:dpi arg:format arguments arg arg arg arg arg arg Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_integer_v2",
    "source_code": "@tf_export('debugging.assert_integer', v1=[])\n@dispatch.add_dispatch_support\ndef assert_integer_v2(x, message=None, name=None):\n    assert_integer(x=x, message=message, name=name)",
    "docstring": "Assert that is of integer dtype. If has a non-integer type, , as well as the dtype of are printed, and is raised. This can always be checked statically, so this method returns nothing. Args: x: A . message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to \"assert_integer\". Raises: TypeError: If is not a non-quantized integer type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:assert_integer_v2 arg:x arg:message arg:name arguments arg arg arg Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_max_squared_sum",
    "source_code": "def get_max_squared_sum(X):\n    return np.sum(X ** 2, axis=1).max()",
    "docstring": "Get the maximum row-wise sum of squares",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_rcv1_logreg_convergence.py",
    "ast_data": "FunctionDef name:get_max_squared_sum arg:X arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "stride_ordered",
    "source_code": "@staticmethod\ndef stride_ordered(sizes, order):\n    assert OrderedSet(range(len(sizes))) == OrderedSet(order)\n    fill_order = stride_order2fill_order(order)\n    return FlexibleLayout.fill_ordered(sizes, fill_order)",
    "docstring": "Create a stride based on the sorted order of a permuted range. In this format, channels last would be: [3, 0, 2, 1]",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:stride_ordered arg:sizes arg:order arguments arg arg Compare Call Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_predict",
    "source_code": "@property\ndef fit_predict(self):\n    raise AttributeError",
    "docstring": "Fit and return the result of each sample's clustering assignment.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_agglomerative.py",
    "ast_data": "FunctionDef name:fit_predict arg:self arguments arg Raise"
  },
  {
    "library": "scipy",
    "name": "tocsc",
    "source_code": "def tocsc(self, copy=False):\n    if self.ndim != 2:\n        raise ValueError(f'Cannot convert. CSC format must be 2D. Got {self.ndim}D')\n    if self.nnz == 0:\n        return self._csc_container(self.shape, dtype=self.dtype)\n    else:\n        from ._csc import csc_array\n        indptr, indices, data, shape = self._coo_to_compressed(csc_array._swap)\n        x = self._csc_container((data, indices, indptr), shape=shape)\n        if not self.has_canonical_format:\n            x.sum_duplicates()\n        return x",
    "docstring": "Convert this array/matrix to Compressed Sparse Column format Duplicate entries will be summed together. Examples -------- >>> from numpy import array >>> from scipy.sparse import coo_array >>> row = array([0, 0, 1, 3, 1, 0, 0]) >>> col = array([0, 2, 1, 3, 1, 0, 0]) >>> data = array([1, 1, 1, 1, 1, 1, 1]) >>> A = coo_array((data, (row, col)), shape=(4, 4)).tocsc() >>> A.toarray() array([[3, 0, 1, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]])",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_coo.py",
    "ast_data": "FunctionDef name:tocsc arg:self arg:copy arguments arg arg If Compare Raise Call If Compare Return return:yes Call Assign Call Assign Call If Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "load_csv_data",
    "source_code": "def load_csv_data(data_file_name, *, data_module=DATA_MODULE, descr_file_name=None, descr_module=DESCR_MODULE, encoding='utf-8'):\n    data_path = resources.files(data_module) / data_file_name\n    with data_path.open('r', encoding='utf-8') as csv_file:\n        data_file = csv.reader(csv_file)\n        temp = next(data_file)\n        n_samples = int(temp[0])\n        n_features = int(temp[1])\n        target_names = np.array(temp[2:])\n        data = np.empty((n_samples, n_features))\n        target = np.empty((n_samples,), dtype=int)\n        for i, ir in enumerate(data_file):\n            data[i] = np.asarray(ir[:-1], dtype=np.float64)\n            target[i] = np.asarray(ir[-1], dtype=int)\n    if descr_file_name is None:\n        return (data, target, target_names)\n    else:\n        assert descr_module is not None\n        descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name)\n        return (data, target, target_names, descr)",
    "docstring": "Loads from importlib.resourcesdata_module/data_file_name'wine_data.csv''sklearn.datasets.data'descr_module/descr_file_name'wine_data.rst'load_descrdescr_file_nameload_descr'sklearn.datasets.descr'datadescr_file_namedescr_file_name` is not None. encoding : str, optional Text encoding of the CSV file. .. versionadded:: 1.4",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_base.py",
    "ast_data": "FunctionDef name:load_csv_data arg:data_file_name arguments arg arg arg arg arg Assign Call With Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call For Call Assign Call Assign Call If Compare Return return:yes Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "write_filepath",
    "source_code": "def write_filepath(filepath, strategy):\n    dirpath = os.path.dirname(filepath)\n    base = os.path.basename(filepath)\n    return os.path.join(write_dirpath(dirpath, strategy), base)",
    "docstring": "Returns the writing file path to be used to save file distributedly. Directory to contain would be created if it doesn't exist. Args: filepath: Original filepath that would be used without distribution. strategy: The tf.distribute strategy object currently used. Returns: The writing filepath that should be used to save file with distribution.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_file_utils.py",
    "ast_data": "FunctionDef name:write_filepath arg:filepath arg:strategy arguments arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "synchronize",
    "source_code": "def synchronize(device: _device_t=None, /) -> None:\n    device_index = _get_device_index(device, True)\n    torch._C._accelerator_synchronizeDevice(device_index)",
    "docstring": "Wait for all kernels in all streams on the given device to complete. Args: device (:class:, str, int, optional): device for which to synchronize. It must match the current :ref: device type. If not given, use :func: by default. .. note:: This function is a no-op if the current :ref: is not initialized. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> assert torch.accelerator.is_available() \"No available accelerators detected.\" >>> start_event = torch.Event(enable_timing=True) >>> end_event = torch.Event(enable_timing=True) >>> start_event.record() >>> tensor = torch.randn(100, device=torch.accelerator.current_accelerator()) >>> sum = torch.sum(tensor) >>> end_event.record() >>> torch.accelerator.synchronize() >>> elapsed_time_ms = start_event.elapsed_time(end_event)",
    "type": "function",
    "file_path": "pytorch\\torch\\accelerator\\__init__.py",
    "ast_data": "FunctionDef name:synchronize arguments arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_matrix_conv",
    "source_code": "def _matrix_conv(self, m1, m2):\n    n = m1[0].shape.as_list()[0]\n    if n != m2[0].shape.as_list()[0]:\n        raise ValueError(f'The entries in matrices m1 and m2 must have the same dimensions. Received m1[0].shape={m1[0].shape} and m2[0].shape={m2[0].shape}.')\n    k = len(m1)\n    l = len(m2)\n    result = {}\n    size = k + l - 1\n    for i in range(size):\n        result[i] = array_ops.zeros([n, n], self.dtype)\n        for index in range(min(k, i + 1)):\n            if i - index < l:\n                result[i] += math_ops.matmul(m1[index], m2[i - index])\n    return result",
    "docstring": "Matrix convolution. Args: m1: A dictionary of length k, each element is a n x n matrix. m2: A dictionary of length l, each element is a n x n matrix. Returns: (k + l - 1) dictionary each element is a n x n matrix. Raises: ValueError: Ff the entries of m1 and m2 are of different dimensions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_matrix_conv arg:self arg:m1 arg:m2 arguments arg arg arg Assign Call If Compare Call Raise Call Assign Call Assign Call Assign Assign For Call Assign Call For Call Call If Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "fillna",
    "source_code": "def fillna(self, value, limit: int | None=None, copy: bool=True) -> Self:\n    if limit is not None:\n        raise ValueError('limit must be None')\n    new_values = np.where(isna(self.sp_values), value, self.sp_values)\n    if self._null_fill_value:\n        new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)\n    else:\n        new_dtype = self.dtype\n    return self._simple_new(new_values, self._sparse_index, new_dtype)",
    "docstring": "Fill missing values with . Parameters ---------- value : scalar limit : int, optional Not supported for SparseArray, must be None. copy: bool, default True Ignored for SparseArray. Returns ------- SparseArray Notes ----- When is specified, the result's ``. Again, this preserves the amount of memory used.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:fillna arg:self arg:value arg:limit arg:copy arguments arg arg arg arg If Compare Raise Call Assign Call Call If Assign Call Assign Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "parameter_numbers",
    "source_code": "@abc.abstractmethod\ndef parameter_numbers(self) -> DSAParameterNumbers:\n    pass",
    "docstring": "Returns a DSAParameterNumbers.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:parameter_numbers arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, onnx_program: torch.onnx.ONNXProgram) -> None:\n    if onnx_program.exported_program is None:\n        raise ValueError('The ONNX program does not contain an exported_program. Please provide an exported_program to verify the ONNX program.')\n    super().__init__(onnx_program.exported_program.module())\n    self._onnx_program = onnx_program\n    self._onnx_values = _create_value_mapping(onnx_program.model.graph)\n    self._args: tuple[Any, ...] = ()\n    self.verification_infos: list[VerificationInfo] = []",
    "docstring": "Initialize the _VerificationInterpreter with an ONNX program. Args: onnx_program: The ONNX program to verify.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_verification.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:onnx_program arguments arg arg If Compare Raise Call Call Call Call Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_write",
    "source_code": "def _write(self, save_path, options=None):\n    write_start_time = time.time()\n    if not self._initialized:\n        self._ensure_initialized()\n    else:\n        self._queue.join()\n        self._copy_to_cpu()\n    self._check_async_thread_error()\n    context.async_wait()\n    self._save_file_prefix = save_path\n    self._use_checkpoint_save = False\n    self._checkpoint_options = copy.copy(options) if options else None\n    if self._checkpoint_options:\n        self._checkpoint_options.experimental_enable_async_checkpoint = False\n    self._queue.put(True)\n    write_end_time = time.time()\n    metrics.AddCheckpointWriteDuration(api_label=_ASYNC_CHECKPOINT, microseconds=_get_duration_microseconds(write_start_time, write_end_time))\n    return save_path",
    "docstring": "Save the checkpointed variables. This method has exactly the same logic as save(), except it does not increment the underlying save_counter, which is done by the caller, e.g., CheckpointManager. Args: save_path: The file prefix of the checkpoint file. options: Optional CheckpointOption instance. Returns: The full path of the checkpoint file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:_write arg:self arg:save_path arg:options arguments arg arg arg Assign Call If Call Call Call Call Call Assign Assign Assign Call If Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_log_prob",
    "source_code": "@abstractmethod\ndef _estimate_log_prob(self, X):\n    pass",
    "docstring": "Estimate the log-probabilities log P(X | Z). Compute the log-probabilities per each component for each sample. Parameters ---------- X : array-like of shape (n_samples, n_features) Returns ------- log_prob : array, shape (n_samples, n_component)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:_estimate_log_prob arg:self arg:X arguments arg arg"
  },
  {
    "library": "kornia",
    "name": "sample",
    "source_code": "def sample(self, sample_size: int, pop_size: int, batch_size: int, device: Optional[Device]=None) -> Tensor:\n    if device is None:\n        device = torch.device('cpu')\n    rand = torch.rand(batch_size, pop_size, device=device)\n    _, out = rand.topk(k=sample_size, dim=1)\n    return out",
    "docstring": "Minimal sampler, but unlike traditional RANSAC we sample in batches. Yields the benefit of the parallel processing, esp. on GPU.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\ransac.py",
    "ast_data": "FunctionDef name:sample arg:self arg:sample_size arg:pop_size arg:batch_size arg:device arguments arg arg arg arg arg If Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "_draw_line",
    "source_code": "def _draw_line(surf, color, start, end):\n    if start.x == end.x:\n        raise ValueError\n    slope = abs((end.y - start.y) / (end.x - start.x))\n    error = 0.0\n    if slope < 1:\n        if end.x < start.x:\n            start.x, end.x = (end.x, start.x)\n            start.y, end.y = (end.y, start.y)\n        line_y = start.y\n        dy_sign = 1 if start.y < end.y else -1\n        for line_x in range(start.x, end.x + 1):\n            set_at(surf, line_x, line_y, color)\n            error += slope\n            if error >= 0.5:\n                line_y += dy_sign\n                error -= 1\n    else:\n        if start.y > end.y:\n            start.x, end.x = (end.x, start.x)\n            start.y, end.y = (end.y, start.y)\n        line_x = start.x\n        slope = 1 / slope\n        dx_sign = 1 if start.x < end.x else -1\n        for line_y in range(start.y, end.y + 1):\n            set_at(surf, line_x, line_y, color)\n            error += slope\n            if error >= 0.5:\n                line_x += dx_sign\n                error -= 1",
    "docstring": "draw a non-horizontal line (without anti-aliasing).",
    "type": "function",
    "file_path": "pygame\\src_py\\draw_py.py",
    "ast_data": "FunctionDef name:_draw_line arg:surf arg:color arg:start arg:end arguments arg arg arg arg If Compare Raise Assign Call Assign If Compare If Compare Assign Assign Assign Assign Compare For Call Call If Compare If Compare Assign Assign Assign Assign Assign Compare For Call Call If Compare"
  },
  {
    "library": "tensorflow",
    "name": "get_output_shape_at",
    "source_code": "def get_output_shape_at(self, node_index):\n    return self._get_node_attribute_at_index(node_index, 'output_shapes', 'output shape')",
    "docstring": "Retrieves the output shape(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first time the layer was called. Returns: A shape tuple (or list of shape tuples if the layer has multiple outputs). Raises: RuntimeError: If called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:get_output_shape_at arg:self arg:node_index arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_select_enter_pivot",
    "source_code": "def _select_enter_pivot(c_hat, bl, a, rule='bland', tol=1e-12):\n    if rule.lower() == 'mrc':\n        return a[~bl][np.argmin(c_hat)]\n    else:\n        return a[~bl][c_hat < -tol][0]",
    "docstring": "Selects a pivot to enter the basis. Currently Bland's rule - the smallest index that has a negative reduced cost - is the default.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_rs.py",
    "ast_data": "FunctionDef name:_select_enter_pivot arg:c_hat arg:bl arg:a arg:rule arg:tol arguments arg arg arg arg arg If Compare Call Return return:yes Call Return return:yes Compare"
  },
  {
    "library": "virtualenv",
    "name": "setup_ignore_vcs",
    "source_code": "def setup_ignore_vcs(self):\n    git_ignore = self.dest / '.gitignore'\n    if not git_ignore.exists():\n        git_ignore.write_text('# created by virtualenv automatically\\n*\\n', encoding='utf-8')",
    "docstring": "Generate ignore instructions for version control systems.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\create\\creator.py",
    "ast_data": "FunctionDef name:setup_ignore_vcs arg:self arguments arg Assign If Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, bus, callback, frequency=60, name=None):\n    SimplePlugin.__init__(self, bus)\n    self.callback = callback\n    self.frequency = frequency\n    self.thread = None\n    self.name = name",
    "docstring": "Initialize the monitor plugin.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bus arg:callback arg:frequency arg:name arguments arg arg arg arg arg Call Assign Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_validate_how",
    "source_code": "@final\ndef _validate_how(self, how: JoinHow | Literal['left_anti', 'right_anti', 'asof']) -> tuple[JoinHow | Literal['asof'], bool]:\n    merge_type = {'left', 'right', 'inner', 'outer', 'left_anti', 'right_anti', 'cross', 'asof'}\n    if how not in merge_type:\n        raise ValueError(f\"'{how}' is not a valid Merge type: left, right, inner, outer, left_anti, right_anti, cross, asof\")\n    anti_join = False\n    if how in {'left_anti', 'right_anti'}:\n        how = how.split('_')[0]\n        anti_join = True\n    how = cast(JoinHow | Literal['asof'], how)\n    return (how, anti_join)",
    "docstring": "Validate the 'how' parameter and return the actual join type and whether this is an anti join.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:_validate_how arg:self arg:how arguments arg arg Assign If Compare Raise Call Assign If Compare Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "config",
    "source_code": "@property\ndef config(self):\n    return self._config",
    "docstring": "Obtain the CLIConfig of this instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\base_ui.py",
    "ast_data": "FunctionDef name:config arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "create_from_checkpoint",
    "source_code": "@classmethod\n@abc.abstractmethod\ndef create_from_checkpoint(cls, path: str):\n    pass",
    "docstring": "Create factory to create an Adapter from checkpoint. Args: path: Path to checkpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_adapter.py",
    "ast_data": "FunctionDef name:create_from_checkpoint arg:cls arg:path arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "mode",
    "source_code": "@abstractmethod\ndef mode(self, *, method):\n    raise NotImplementedError()",
    "docstring": "Mode (most likely value) Informally, the mode is a value that a random variable has the highest probability (density) of assuming. That is, the mode is the element of the support :math: that maximizes the probability density (or mass, for discrete random variables) function :math:: .. math:: \\text{mode} = \\arg\\max_{x \\in \\chi} f(x) Parameters ---------- method : {None, 'formula', 'optimization'} The strategy used to evaluate the mode. By default (`methodmethodmode`: >>> class BetterUniform(stats.Uniform): ... def mode(self): ... return self.b >>> X = BetterUniform(a=0., b=1.) >>> X.mode() 1.0",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_probability_distribution.py",
    "ast_data": "FunctionDef name:mode arg:self arguments arg arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "log",
    "source_code": "def log(self) -> Tensor:\n    theta = batched_dot_product(self.q.vec, self.q.vec).sqrt()\n    omega = where(theta[..., None] != 0, 2 * self.q.real[..., None].acos() * self.q.vec / theta[..., None], 2 * self.q.vec / self.q.real[..., None])\n    return omega",
    "docstring": "Convert elements of lie group to elements of lie algebra. Example: >>> data = torch.ones((2, 4)) >>> q = Quaternion(data) >>> So3(q).log() tensor([[0., 0., 0.], [0., 0., 0.]], grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:log arg:self arguments arg Assign Call Call Assign Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_composite_shape",
    "source_code": "def get_composite_shape(tensor):\n    if isinstance(tensor, sparse_tensor.SparseTensorValue):\n        return tensor.dense_shape\n    else:\n        return tensor.shape",
    "docstring": "Returns the shape of the passed composite tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:get_composite_shape arg:tensor arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "list_append",
    "source_code": "def list_append(list_, x):\n    if isinstance(list_, tensor_array_ops.TensorArray):\n        return _tf_tensorarray_append(list_, x)\n    elif tensor_util.is_tf_type(list_):\n        if list_.dtype == dtypes.variant:\n            return _tf_tensor_list_append(list_, x)\n        else:\n            raise ValueError('tensor lists are expected to be Tensors with dtype=tf.variant, instead found %s' % list_)\n    else:\n        return _py_list_append(list_, x)",
    "docstring": "The list append function. Note: it is unspecified where list_ will be mutated or not. If list_ is a TensorFlow entity, it will not be typically mutated. If list_ is a plain list, it will be. In general, if the list is mutated then the return value should point to the original entity. Args: list_: An entity that supports append semantics. x: The element to append. Returns: Same as list_, after the append was performed. Raises: ValueError: if list_ is not of a known list-like type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py",
    "ast_data": "FunctionDef name:list_append arg:list_ arg:x arguments arg arg If Call Return return:yes Call If Call If Compare Return return:yes Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_input",
    "source_code": "def add_input(self, *args, **kwargs):\n    return self._inputs.add(*args, **kwargs)",
    "docstring": "Add a wrapped input argument to the hint. Args: *args: The input tensor. **kwargs: \"name\" label \"tag\" a tag to group multiple arguments that will be aggregated. I.e. a string like 'cool_input'. Basically multiple inputs can be added to the same hint for parallel operations that will eventually be combined. An example would be static_rnn which creates multiple copies of state or inputs. \"aggregate\" aggregation strategy that is valid only for tag non None. Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST, and OpHint.AGGREGATE_STACK. \"index_override\" The global index to use. This corresponds to the argument order in the final stub that will be generated. Returns: The wrapped input tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:add_input arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_path_in_displaycoord",
    "source_code": "def _get_path_in_displaycoord(self):\n    dpi_cor = self._dpi_cor\n    if self._posA_posB is not None:\n        posA = self._convert_xy_units(self._posA_posB[0])\n        posB = self._convert_xy_units(self._posA_posB[1])\n        posA, posB = self.get_transform().transform((posA, posB))\n        _path = self.get_connectionstyle()(posA, posB, patchA=self.patchA, patchB=self.patchB, shrinkA=self.shrinkA * dpi_cor, shrinkB=self.shrinkB * dpi_cor)\n    else:\n        _path = self.get_transform().transform_path(self._path_original)\n    _path, fillable = self.get_arrowstyle()(_path, self.get_mutation_scale() * dpi_cor, self.get_linewidth() * dpi_cor, self.get_mutation_aspect())\n    return (_path, fillable)",
    "docstring": "Return the mutated path of the arrow in display coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:_get_path_in_displaycoord arg:self arguments arg Assign If Compare Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "consumes",
    "source_code": "def consumes(self, method, params):\n    res = set()\n    if self._self_request:\n        res = res | self._self_request.consumes(method=method, params=params)\n    for _, route_mapping in self._route_mappings.items():\n        for caller, callee in route_mapping.mapping:\n            if caller == method:\n                res = res | route_mapping.router.consumes(method=callee, params=params)\n    return res",
    "docstring": "Check whether the given parameters are consumed by the given method. .. versionadded:: 1.4 Parameters ---------- method : str The name of the method to check. params : iterable of str An iterable of parameters to check. Returns ------- consumed : set of str A set of parameters which are consumed by the given method.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:consumes arg:self arg:method arg:params arguments arg arg arg Assign Call If Assign Call For Call For If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_split",
    "source_code": "def _split(value, splits, axis=0, split_fn=np.split, stack_fn=np.stack):\n    children = split_fn(value, splits[0], axis=axis)\n    if len(splits) > 1:\n        splits = splits[1:]\n        children = [_split(child, splits, axis + 1) for child in children]\n    return stack_fn(children)",
    "docstring": "Split into a sharded nparray/tf tensor based on the number of splits.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\numpy_util.py",
    "ast_data": "FunctionDef name:_split arg:value arg:splits arg:axis arg:split_fn arg:stack_fn arguments arg arg arg arg arg Assign Call If Compare Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "TodoList",
    "source_code": "class TodoList(SphinxDirective):\n    has_content = False\n    required_arguments = 0\n    optional_arguments = 0\n    final_argument_whitespace = False\n    option_spec: ClassVar[OptionSpec] = {}\n\n    def run(self) -> list[Node]:\n        return [todolist('')]",
    "docstring": "A list of all todo entries.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\todo.py",
    "ast_data": "ClassDef name:TodoList Assign Assign Assign Assign FunctionDef name:run arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "rename_parameter",
    "source_code": "def rename_parameter(since, old, new, func=None):\n    decorator = functools.partial(rename_parameter, since, old, new)\n    if func is None:\n        return decorator\n    signature = inspect.signature(func)\n    assert old not in signature.parameters, f'Matplotlib internal error: {old!r} cannot be a parameter for {func.__name__}()'\n    assert new in signature.parameters, f'Matplotlib internal error: {new!r} must be a parameter for {func.__name__}()'\n\n    @functools.wraps(func)\n    def wrapper(*args, **kwargs):\n        if old in kwargs:\n            warn_deprecated(since, message=f'The {old!r} parameter of {func.__name__}() has been renamed {new!r} since Matplotlib {since}; support for the old name will be dropped in %(removal)s.')\n            kwargs[new] = kwargs.pop(old)\n        return func(*args, **kwargs)\n    DECORATORS[wrapper] = decorator\n    return wrapper",
    "docstring": "Decorator indicating that parameter *old* of *func* is renamed to *new*. The actual implementation of *func* should use *new*, not *old*. If *old* is passed to *func*, a DeprecationWarning is emitted, and its value is used, even if *new* is also passed by keyword (this is to simplify pyplot wrapper functions, which always pass *new* explicitly to the Axes method). If *new* is also passed but positionally, a TypeError will be raised by the underlying function during argument binding. Examples -------- :: @_api.rename_parameter(\"3.1\", \"bad_name\", \"good_name\") def func(good_name): ...",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\deprecation.py",
    "ast_data": "FunctionDef name:rename_parameter arg:since arg:old arg:new arg:func arguments arg arg arg arg Assign Call If Compare Return return:yes Assign Call Compare Compare FunctionDef name:wrapper arguments arg arg If Compare Call Assign Call Return return:yes Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_save_and_write_assets",
    "source_code": "def _save_and_write_assets(self, assets_collection_to_add=None):\n    asset_filename_map = _maybe_save_assets(_add_asset_to_collection, assets_collection_to_add)\n    if not asset_filename_map:\n        tf_logging.info('No assets to write.')\n        return\n    copy_assets_to_destination_dir(asset_filename_map, self._export_dir, self._saved_asset_files)",
    "docstring": "Saves asset to the meta graph and writes asset files to disk. Args: assets_collection_to_add: The collection where the asset paths are setup.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:_save_and_write_assets arg:self arg:assets_collection_to_add arguments arg arg Assign Call If Call Return return:no Call"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "def decision_function(self, X):\n    X = validate_data(self, X, dtype=DTYPE, order='C', accept_sparse='csr', reset=False)\n    raw_predictions = self._raw_predict(X)\n    if raw_predictions.shape[1] == 1:\n        return raw_predictions.ravel()\n    return raw_predictions",
    "docstring": "Compute the decision function of `classes_`. Regression and binary classification produce an array of shape (n_samples,).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Assign Call Assign Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "write_svg_depth",
    "source_code": "def write_svg_depth(filename: Path, depth: int) -> None:\n    with open(filename, 'a', encoding='utf-8') as f:\n        f.write('\\n<!-- DEPTH=%s -->' % depth)",
    "docstring": "Write the depth to SVG file as a comment at end of file",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\imgmath.py",
    "ast_data": "FunctionDef name:write_svg_depth arg:filename arg:depth arguments arg arg With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_run_metadata",
    "source_code": "def enable_run_metadata():\n    context().enable_run_metadata()",
    "docstring": "Enables tracing of op execution via RunMetadata. To retrieve the accumulated metadata call context.export_run_metadata() and to stop tracing call context.disable_run_metadata().",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:enable_run_metadata arguments Call Call"
  },
  {
    "library": "pytorch",
    "name": "exit_after",
    "source_code": "def exit_after(s):\n\n    def outer(fn):\n\n        def inner(*args, **kwargs):\n            signal.signal(signal.SIGALRM, alarm_handler)\n            signal.alarm(s)\n            try:\n                result = fn(*args, **kwargs)\n            finally:\n                signal.alarm(0)\n            return result\n        return inner\n    return outer",
    "docstring": "Decorator to raise TimeoutException if the fn is taking more than s seconds to run.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\dynamo\\common.py",
    "ast_data": "FunctionDef name:exit_after arg:s arguments arg FunctionDef name:outer arg:fn arguments arg FunctionDef name:inner arguments arg arg Call Call Try Assign Call Call Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_is_empty_column_selection",
    "source_code": "def _is_empty_column_selection(column):\n    if hasattr(column, 'dtype') and np.issubdtype(column.dtype, np.bool_):\n        return not column.any()\n    elif hasattr(column, '__len__'):\n        return len(column) == 0 or (all((isinstance(col, bool) for col in column)) and (not any(column)))\n    else:\n        return False",
    "docstring": "Return True if the column selection is empty (empty list or all-False boolean array).",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:_is_empty_column_selection arg:column arguments arg If BoolOp Call Call Return return:yes Call If Call Return return:yes BoolOp Compare Call BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Context",
    "source_code": "class Context(object):\n\n    def __init__(self, info, namer, user_context):\n        self.info = info\n        self.namer = namer\n        self.current_origin = None\n        self.user = user_context",
    "docstring": "Contains information about a source code transformation. This object is mutable, and is updated during conversion. Not thread safe. Attributes: info: EntityInfo, immutable. namer: naming.Namer. current_origin: origin_info.OriginInfo, holds the OriginInfo of the last AST node to be processed successfully. Useful for error handling. user: An user-supplied context object. The object is opaque to the infrastructure, but will pe passed through to all custom transformations.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transformer.py",
    "ast_data": "ClassDef name:Context FunctionDef name:__init__ arg:self arg:info arg:namer arg:user_context arguments arg arg arg arg Assign Assign Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_get_resampler",
    "source_code": "def _get_resampler(self, obj: NDFrame) -> Resampler:\n    _, ax, _ = self._set_grouper(obj, gpr_index=None)\n    if isinstance(ax, DatetimeIndex):\n        return DatetimeIndexResampler(obj, timegrouper=self, group_keys=self.group_keys, gpr_index=ax)\n    elif isinstance(ax, PeriodIndex):\n        if isinstance(ax, PeriodIndex):\n            warnings.warn('Resampling with a PeriodIndex is deprecated. Cast index to DatetimeIndex before resampling instead.', FutureWarning, stacklevel=find_stack_level())\n        return PeriodIndexResampler(obj, timegrouper=self, group_keys=self.group_keys, gpr_index=ax)\n    elif isinstance(ax, TimedeltaIndex):\n        return TimedeltaIndexResampler(obj, timegrouper=self, group_keys=self.group_keys, gpr_index=ax)\n    raise TypeError(f\"Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex, but got an instance of '{type(ax).__name__}'\")",
    "docstring": "Return my resampler or raise if we have an invalid axis. Parameters ---------- obj : Series or DataFrame Returns ------- Resampler Raises ------ TypeError if incompatible axis",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_get_resampler arg:self arg:obj arguments arg arg Assign Call If Call Return return:yes Call If Call If Call Call Call Return return:yes Call If Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "numpy",
    "name": "cov",
    "source_code": "def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):\n    if ddof is not None and ddof != int(ddof):\n        raise ValueError('ddof must be an integer')\n    if ddof is None:\n        if bias:\n            ddof = 0\n        else:\n            ddof = 1\n    x, xnotmask, rowvar = _covhelper(x, y, rowvar, allow_masked)\n    if not rowvar:\n        fact = np.dot(xnotmask.T, xnotmask) - ddof\n        mask = np.less_equal(fact, 0, dtype=bool)\n        with np.errstate(divide='ignore', invalid='ignore'):\n            data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact\n        result = ma.array(data, mask=mask).squeeze()\n    else:\n        fact = np.dot(xnotmask, xnotmask.T) - ddof\n        mask = np.less_equal(fact, 0, dtype=bool)\n        with np.errstate(divide='ignore', invalid='ignore'):\n            data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact\n        result = ma.array(data, mask=mask).squeeze()\n    return result",
    "docstring": "Estimate the covariance matrix. Except for the handling of missing data this function does the same as . For more details and examples, see . By default, masked values are recognized as such. If and have the same shape, a common mask is allocated: if `allow_maskedxrowvaryxrowvarbiasxyValueErrorallow_masked` is False. See Also -------- numpy.cov Examples -------- >>> import numpy as np >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) >>> y = np.ma.array([[1, 0], [0, 1]], mask=[0, 0, 1, 1]) >>> np.ma.cov(x, y) masked_array( data=[[--, --, --, --], [--, --, --, --], [--, --, --, --], [--, --, --, --]], mask=[[ True, True, True, True], [ True, True, True, True], [ True, True, True, True], [ True, True, True, True]], fill_value=1e+20, dtype=float64)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:cov arg:x arg:y arg:rowvar arg:bias arg:allow_masked arg:ddof arguments arg arg arg arg arg arg If BoolOp Compare Compare Call Raise Call If Compare If Assign Assign Assign Call If Assign Call Assign Call With Call Assign Call Call Call Call Assign Call Call Assign Call Assign Call With Call Assign Call Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "pop",
    "source_code": "def pop(self, key: str) -> Any:\n    v = self[key]\n    del self[key]\n    return v",
    "docstring": "Remove key from the ParameterDict and return its parameter. Args: key (str): key to pop from the ParameterDict",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:pop arg:self arg:key arguments arg arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_key_for_call_stats",
    "source_code": "def _get_key_for_call_stats(self):\n    target_function = self._python_function\n    while hasattr(target_function, '__wrapped__'):\n        target_function = target_function.__wrapped__\n    if hasattr(target_function, '__func__'):\n        target_function = target_function.__func__\n    if hasattr(target_function, '__code__'):\n        return target_function.__code__\n    return self._python_function",
    "docstring": "Returns key instance to track call stats and retracings. The key instance a best-effort to preserve global consistency.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:_get_key_for_call_stats arg:self arguments arg Assign While Call Assign If Call Assign If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_float",
    "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n    assert type_before_parametrizations(mod) == cls._FLOAT_MODULE, ' qat.' + cls.__name__ + '.from_float only works for ' + cls._FLOAT_MODULE.__name__\n    assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n    assert mod.qconfig, 'Input float module must have a valid qconfig'\n    if type_before_parametrizations(mod) == LinearReLU:\n        mod = mod[0]\n    qconfig = mod.qconfig\n    qat_linear = cls(mod.in_features, mod.out_features, bias=mod.bias is not None, qconfig=qconfig)\n    if is_parametrized(mod, 'weight'):\n        transfer_parametrizations_and_params(mod, qat_linear, 'weight')\n    else:\n        qat_linear.weight = mod.weight\n    if is_parametrized(mod, 'bias'):\n        transfer_parametrizations_and_params(mod, qat_linear, 'bias')\n    else:\n        qat_linear.bias = mod.bias\n    return qat_linear",
    "docstring": "Create a qat module from a float module or qparams_dict Args: a float module, either produced by torch.ao.quantization utilities or directly from user",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\qat\\modules\\linear.py",
    "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Compare Call Call If Compare Call Assign Assign Assign Call Compare If Call Call Assign If Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_tfdbg_logo",
    "source_code": "def get_tfdbg_logo():\n    lines = ['', 'TTTTTT FFFF DDD  BBBB   GGG ', '  TT   F    D  D B   B G    ', '  TT   FFF  D  D BBBB  G  GG', '  TT   F    D  D B   B G   G', '  TT   F    DDD  BBBB   GGG ', '']\n    return debugger_cli_common.RichTextLines(lines)",
    "docstring": "Make an ASCII representation of the tfdbg logo.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\cli_shared.py",
    "ast_data": "FunctionDef name:get_tfdbg_logo arguments Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "median",
    "source_code": "@final\ndef median(self, numeric_only: bool=False):\n    return self._downsample('median', numeric_only=numeric_only)",
    "docstring": "Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. .. versionchanged:: 2.0.0 numeric_only no longer accepts `` and defaults to False. Returns ------- Series or DataFrame Median of values within each group. See Also -------- Series.groupby : Apply a function groupby to a Series. DataFrame.groupby : Apply a function groupby to each row or column of a DataFrame. Examples -------- >>> ser = pd.Series( ... [1, 2, 3, 3, 4, 5], ... index=pd.DatetimeIndex( ... [ ... \"2023-01-01\", ... \"2023-01-10\", ... \"2023-01-15\", ... \"2023-02-01\", ... \"2023-02-10\", ... \"2023-02-15\", ... ] ... ), ... ) >>> ser.resample(\"MS\").median() 2023-01-01 2.0 2023-02-01 4.0 Freq: MS, dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:median arg:self arg:numeric_only arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "unproject_points_z1",
    "source_code": "def unproject_points_z1(points_in_cam_canonical: Tensor, extension: Optional[Tensor]=None) -> Tensor:\n    KORNIA_CHECK_SHAPE(points_in_cam_canonical, ['*', '2'])\n    if extension is None:\n        extension = ops.ones(points_in_cam_canonical.shape[:-1] + (1,), device=points_in_cam_canonical.device, dtype=points_in_cam_canonical.dtype)\n    elif extension.shape[0] > 1:\n        extension = extension[..., None]\n    return ops.concatenate([points_in_cam_canonical * extension, extension], dim=-1)",
    "docstring": "Unproject one or more points from the canonical z=1 plane into the camera frame. .. math:: \\begin{bmatrix} x \\\\ y \\\\ z \\end{bmatrix} = \\begin{bmatrix} u \\\\ v \\end{bmatrix} \\cdot w Args: points_in_cam_canonical: Tensor representing the points to unproject with shape (..., 2). extension: Tensor representing the extension (depth) of the points to unproject with shape (..., 1). Returns: Tensor representing the unprojected points with shape (..., 3). Example: >>> points = torch.tensor([1., 2.]) >>> extension = torch.tensor([3.]) >>> unproject_points_z1(points, extension) tensor([3., 6., 3.])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\projection_z1.py",
    "ast_data": "FunctionDef name:unproject_points_z1 arg:points_in_cam_canonical arg:extension arguments arg arg Call If Compare Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_ixs",
    "source_code": "def _ixs(self, i: int, axis: AxisInt=0) -> Any:\n    return self._values[i]",
    "docstring": "Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:_ixs arg:self arg:i arg:axis arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_variable",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef add_variable(self, *args, **kwargs):\n    warnings.warn('`layer.add_variable` is deprecated and will be removed in a future version. Please use `layer.add_weight` method instead.')\n    return self.add_weight(*args, **kwargs)",
    "docstring": "Deprecated, do NOT use! Alias for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:add_variable arg:self arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "string_to_number",
    "source_code": "@tf_export('strings.to_number', v1=[])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef string_to_number(input, out_type=dtypes.float32, name=None):\n    return gen_parsing_ops.string_to_number(input, out_type, name)",
    "docstring": "Converts each string in the input Tensor to the specified numeric type. (Note that int32 overflow results in an error while float overflow results in a rounded value.) Examples: >>> tf.strings.to_number(\"1.55\") >>> tf.strings.to_number(\"3\", tf.int32) Args: input: A of type . out_type: An optional from: . Defaults to . The numeric type to interpret each string in as. name: A name for the operation (optional). Returns: A of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\string_ops.py",
    "ast_data": "FunctionDef name:string_to_number arg:input arg:out_type arg:name arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "YieldValueOp",
    "source_code": "class YieldValueOp(Exception):\n    pass",
    "docstring": "Signal to the symbolic tracer to stop and return control flow to the caller",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py",
    "ast_data": "ClassDef name:YieldValueOp"
  },
  {
    "library": "scipy",
    "name": "ward",
    "source_code": "@lazy_cython\ndef ward(y):\n    return linkage(y, method='ward', metric='euclidean')",
    "docstring": "Perform Ward's linkage on a condensed distance matrix. See for more information on the return structure and algorithm. The following are common calling conventions: 1. `linkagescipy.cluster.hierarchy.linkagescipy.cluster.hierarchy.fclusterscipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:ward arg:y arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_nested_time_distributed",
    "source_code": "def convert_nested_time_distributed(weights):\n    return preprocess_weights_for_loading(layer.layer, weights, original_keras_version, original_backend)",
    "docstring": "Converts layers nested in wrapper. This function uses for converting nested layers. Args: weights: List of weights values (Numpy arrays). Returns: A list of weights values (Numpy arrays).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py",
    "ast_data": "FunctionDef name:convert_nested_time_distributed arg:weights arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "rendezvous",
    "source_code": "def rendezvous(tensor: torch.Tensor, group: Union[str, 'ProcessGroup']) -> _SymmetricMemory:\n    from torch._C._distributed_c10d import ProcessGroup\n    if isinstance(group, str):\n        group_name = group\n    elif isinstance(group, ProcessGroup):\n        group_name = group.group_name\n    else:\n        raise TypeError(f'rendezvous: unsupported group type: {type(group)}')\n    enable_symm_mem_for_group(group_name)\n    return _SymmetricMemory.rendezvous(tensor, group_name)",
    "docstring": "rendezvous(tensor, group) -> _SymmetricMemory Establish a symmetric memory tensor among participating processes. This is a collective operation. Args: tensor (:class:): the local tensor used to establish the symmetric memory tensor. It must be allocated via :func:. The shape, dtype, and device type must be identical across all participating processes. group (Union[str, :class:]): The group identifying the participating processes. This can be either a group name or a process group object.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py",
    "ast_data": "FunctionDef name:rendezvous arg:tensor arg:group arguments arg arg If Call Assign If Call Assign Raise Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "handle_label",
    "source_code": "def handle_label(self, label, **options):\n    raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')",
    "docstring": "Perform the command's actions for ``, which will be the string as given on the command line.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "FunctionDef name:handle_label arg:self arg:label arguments arg arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "pin_min_versions_to_ci_deps",
    "source_code": "def pin_min_versions_to_ci_deps() -> int:\n    all_yaml_files = list(YAML_PATH.iterdir())\n    all_yaml_files.append(ENV_PATH)\n    toml_dependencies = {}\n    with open(SETUP_PATH, 'rb') as toml_f:\n        toml_dependencies = tomllib.load(toml_f)\n    ret = 0\n    for curr_file in all_yaml_files:\n        with open(curr_file, encoding='utf-8') as yaml_f:\n            yaml_start_data = yaml_f.read()\n        yaml_file = yaml.safe_load(yaml_start_data)\n        yaml_dependencies = yaml_file['dependencies']\n        yaml_map = get_yaml_map_from(yaml_dependencies)\n        toml_map = get_toml_map_from(toml_dependencies)\n        yaml_result_data = pin_min_versions_to_yaml_file(yaml_map, toml_map, yaml_start_data)\n        if yaml_result_data != yaml_start_data:\n            with open(curr_file, 'w', encoding='utf-8') as f:\n                f.write(yaml_result_data)\n            ret |= 1\n    return ret",
    "docstring": "Pin minimum versions to CI dependencies. Pip dependencies are not pinned.",
    "type": "function",
    "file_path": "pandas\\scripts\\validate_min_versions_in_sync.py",
    "ast_data": "FunctionDef name:pin_min_versions_to_ci_deps arguments Assign Call Call Call Assign With Call Assign Call Assign For With Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call If Compare With Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TritonBundle",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass TritonBundle:\n    kernel_artifacts: list[TritonKernelArtifacts]\n    static_autotuners: list[StaticallyLaunchedAutotuner]",
    "docstring": "Serializable bundle to save into FXGraphCache",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\triton_bundler.py",
    "ast_data": "ClassDef name:TritonBundle Call"
  },
  {
    "library": "cherrypy",
    "name": "on_login",
    "source_code": "def on_login(self, username):\n    pass",
    "docstring": "Process a successful login event. :param username: The logged in user name. :type username: str",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:on_login arg:self arg:username arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "from_float",
    "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n    float_modules = [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.ao.nn.intrinsic.modules.fused.LinearReLU, torch.ao.nn.qat.dynamic.Linear]\n    assert type(mod) in float_modules, 'nn.quantized.dynamic.Linear.from_float only works for one of' + str([float_mod.__name__ for float_mod in float_modules])\n    assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n    if type(mod) == nni.LinearReLU:\n        mod = mod[0]\n    if mod.qconfig is not None and mod.qconfig.weight is not None:\n        weight_observer = mod.qconfig.weight()\n    else:\n        from torch.ao.quantization.qconfig import default_dynamic_qconfig\n        weight_observer = default_dynamic_qconfig.weight()\n    dtype = weight_observer.dtype\n    assert dtype in [torch.qint8, torch.float16], f'The only supported dtypes for dynamic quantized linear are qint8 and float16 got: {dtype}'\n    weight_observer(mod.weight)\n    if dtype == torch.qint8:\n        qweight = _quantize_weight(mod.weight.float(), weight_observer)\n    elif dtype == torch.float16:\n        qweight = mod.weight.float()\n    else:\n        raise RuntimeError('Unsupported dtype specified for dynamic quantized Linear!')\n    qlinear = cls(mod.in_features, mod.out_features, dtype=dtype)\n    qlinear.set_weight_bias(qweight, mod.bias)\n    return qlinear",
    "docstring": "Create a dynamic quantized module from a float module or qparams_dict Args: mod (Module): a float module, either produced by torch.ao.quantization utilities or provided by the user",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\dynamic\\modules\\linear.py",
    "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Assign Compare Call Call Call If Compare Call Assign If BoolOp Compare Compare Assign Call Assign Call Assign Compare Call If Compare Assign Call Call If Compare Assign Call Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_nodb_cursor",
    "source_code": "@contextmanager\ndef _nodb_cursor(self):\n    conn = self.__class__({**self.settings_dict, 'NAME': None}, alias=NO_DB_ALIAS)\n    try:\n        with conn.cursor() as cursor:\n            yield cursor\n    finally:\n        conn.close()",
    "docstring": "Return a cursor from an alternative connection to be used when there is no need to access the main database, specifically for test db creation/deletion. This also prevents the production database from being exposed to potential child threads while (or after) the test database is destroyed. Refs #10868, #17786, #16969.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:_nodb_cursor arg:self arguments arg Assign Call Try With Call Call"
  },
  {
    "library": "kornia",
    "name": "t",
    "source_code": "@property\ndef t(self) -> Vector2 | Parameter:\n    return self._translation",
    "docstring": "Return the underlying translation vector of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:t arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_update_axisinfo",
    "source_code": "def _update_axisinfo(self):\n    if self._converter is None:\n        return\n    info = self._converter.axisinfo(self.units, self)\n    if info is None:\n        return\n    if info.majloc is not None and self.major.locator != info.majloc and self.isDefault_majloc:\n        self.set_major_locator(info.majloc)\n        self.isDefault_majloc = True\n    if info.minloc is not None and self.minor.locator != info.minloc and self.isDefault_minloc:\n        self.set_minor_locator(info.minloc)\n        self.isDefault_minloc = True\n    if info.majfmt is not None and self.major.formatter != info.majfmt and self.isDefault_majfmt:\n        self.set_major_formatter(info.majfmt)\n        self.isDefault_majfmt = True\n    if info.minfmt is not None and self.minor.formatter != info.minfmt and self.isDefault_minfmt:\n        self.set_minor_formatter(info.minfmt)\n        self.isDefault_minfmt = True\n    if info.label is not None and self.isDefault_label:\n        self.set_label_text(info.label)\n        self.isDefault_label = True\n    self.set_default_intervals()",
    "docstring": "Check the axis converter for the stored units to see if the axis info needs to be updated.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_update_axisinfo arg:self arguments arg If Compare Return return:no Assign Call If Compare Return return:no If BoolOp Compare Compare Call Assign If BoolOp Compare Compare Call Assign If BoolOp Compare Compare Call Assign If BoolOp Compare Compare Call Assign If BoolOp Compare Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_CTCLossGrad",
    "source_code": "@ops.RegisterGradient('CTCLoss')\ndef _CTCLossGrad(op, grad_loss, _):\n    return _CTCLossGradImpl(op, grad_loss, _)",
    "docstring": "The derivative provided by CTC Loss. Args: op: the CTCLoss op. grad_loss: The backprop for cost. Returns: The CTC Loss gradient.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ctc_ops.py",
    "ast_data": "FunctionDef name:_CTCLossGrad arg:op arg:grad_loss arg:_ arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_forward_event_shape_tensor",
    "source_code": "def _forward_event_shape_tensor(self, input_shape):\n    return input_shape",
    "docstring": "Subclass implementation for function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_forward_event_shape_tensor arg:self arg:input_shape arguments arg arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_note_term",
    "source_code": "def _note_term(self, term: str, labelid: str, location: Any=None) -> None:\n    self.note_object('term', term, labelid, location)\n    self._terms[term.lower()] = (self.env.docname, labelid)",
    "docstring": "Note a term for cross reference. .. note:: Will be removed soon. internal use only.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "FunctionDef name:_note_term arg:self arg:term arg:labelid arg:location arguments arg arg arg arg Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "LogisticNormal",
    "source_code": "class LogisticNormal(TransformedDistribution):\n    arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n    support = constraints.simplex\n    has_rsample = True\n    base_dist: Independent[Normal]\n\n    def __init__(self, loc: Union[Tensor, float], scale: Union[Tensor, float], validate_args: Optional[bool]=None) -> None:\n        base_dist = Normal(loc, scale, validate_args=validate_args)\n        if not base_dist.batch_shape:\n            base_dist = base_dist.expand([1])\n        super().__init__(base_dist, StickBreakingTransform(), validate_args=validate_args)\n\n    def expand(self, batch_shape, _instance=None):\n        new = self._get_checked_instance(LogisticNormal, _instance)\n        return super().expand(batch_shape, _instance=new)\n\n    @property\n    def loc(self) -> Tensor:\n        return self.base_dist.base_dist.loc\n\n    @property\n    def scale(self) -> Tensor:\n        return self.base_dist.base_dist.scale",
    "docstring": "Creates a logistic-normal distribution parameterized by :attr: and :attr: that define the base distribution transformed with the such that:: X ~ LogisticNormal(loc, scale) Y = log(X / (1 - X.cumsum(-1)))[..., :-1] ~ Normal(loc, scale) Args: loc (float or Tensor): mean of the base distribution scale (float or Tensor): standard deviation of the base distribution Example:: >>> # logistic-normal distributed with mean=(0, 0, 0) and stddev=(1, 1, 1) >>> # of the base Normal distribution >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> m = LogisticNormal(torch.tensor([0.0] * 3), torch.tensor([1.0] * 3)) >>> m.sample() tensor([ 0.7653, 0.0341, 0.0579, 0.1427])",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\logistic_normal.py",
    "ast_data": "ClassDef name:LogisticNormal Assign Assign Assign FunctionDef name:__init__ arg:self arg:loc arg:scale arg:validate_args arguments arg arg arg arg Assign Call If Assign Call Call Call Call FunctionDef name:expand arg:self arg:batch_shape arg:_instance arguments arg arg arg Assign Call Return return:yes Call Call FunctionDef name:loc arg:self arguments arg Return return:yes FunctionDef name:scale arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "limit_range_for_scale",
    "source_code": "def limit_range_for_scale(self, vmin, vmax, minpos):\n    if not np.isfinite(minpos):\n        minpos = 1e-07\n    return (minpos if vmin <= 0 else vmin, 1 - minpos if vmax >= 1 else vmax)",
    "docstring": "Limit the domain to values between 0 and 1 (excluded).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:limit_range_for_scale arg:self arg:vmin arg:vmax arg:minpos arguments arg arg arg arg If Call Assign Return return:yes Compare Compare"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, 'n_features_in_')\n    input_features = _check_feature_names_in(self, input_features)\n    prefix = self.__class__.__name__.lower()\n    return np.asarray([f'{prefix}_{feature_name}' for feature_name in input_features[self.features_]], dtype=object)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If is , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_base.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_gencode_flags",
    "source_code": "def get_gencode_flags() -> str:\n    arch_list = get_arch_list()\n    if len(arch_list) == 0:\n        return ''\n    arch_list_ = [arch.split('_') for arch in arch_list]\n    return ' '.join([f'-gencode compute=compute_{arch},code={kind}_{arch}' for kind, arch in arch_list_])",
    "docstring": "Return NVCC gencode flags this library was compiled with.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:get_gencode_flags arguments Assign Call If Compare Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_active",
    "source_code": "def set_active(self, index, state=None):\n    if index not in range(len(self.labels)):\n        raise ValueError(f'Invalid CheckButton index: {index}')\n    _api.check_isinstance((bool, None), state=state)\n    invisible = colors.to_rgba('none')\n    facecolors = self._checks.get_facecolor()\n    if state is None:\n        state = colors.same_color(facecolors[index], invisible)\n    facecolors[index] = self._active_check_colors[index] if state else invisible\n    self._checks.set_facecolor(facecolors)\n    if self.drawon:\n        if self._useblit:\n            if self._background is not None:\n                self.canvas.restore_region(self._background)\n            self.ax.draw_artist(self._checks)\n            self.canvas.blit(self.ax.bbox)\n        else:\n            self.canvas.draw()\n    if self.eventson:\n        self._observers.process('clicked', self.labels[index].get_text())",
    "docstring": "Modify the state of a check button by index. Callbacks will be triggered if :attr: is True. Parameters ---------- index : int Index of the check button to toggle. state : bool, optional If a boolean value, set the state explicitly. If no value is provided, the state is toggled. Raises ------ ValueError If *index* is invalid. TypeError If *state* is not boolean.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_active arg:self arg:index arg:state arguments arg arg arg If Compare Call Call Raise Call Call Assign Call Assign Call If Compare Assign Call Assign Call If If If Compare Call Call Call Call If Call Call"
  },
  {
    "library": "kornia",
    "name": "_torch_histc_cast",
    "source_code": "def _torch_histc_cast(input: Tensor, bins: int, min: int, max: int) -> Tensor:\n    if not isinstance(input, Tensor):\n        raise AssertionError(f'Input must be Tensor. Got: {type(input)}.')\n    dtype: torch.dtype = input.dtype\n    if dtype not in (torch.float32, torch.float64):\n        dtype = torch.float32\n    return torch.histc(input.to(dtype), bins, min, max).to(input.dtype)",
    "docstring": "Make torch.histc work with other than fp32/64. The function torch.histc is only implemented for fp32/64 which makes impossible to be used by fp16 or others. What this function does, is cast input data type to fp32, apply torch.inverse, and cast back to the input dtype.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_torch_histc_cast arg:input arg:bins arg:min arg:max arguments arg arg arg arg If Call Raise Call Call If Compare Assign Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "_replace_return_type",
    "source_code": "def _replace_return_type(self, doc, np_ret, np_ma_ret):\n    if np_ret not in doc:\n        raise RuntimeError(f'Failed to replace `{np_ret}` with `{np_ma_ret}`. The documentation string for return type, {np_ret}, is not found in the docstring for `np.{self._func.__name__}`. Fix the docstring for `np.{self._func.__name__}` or update the expected string for return type.')\n    return doc.replace(np_ret, np_ma_ret)",
    "docstring": "Replace documentation of `` method. (e.g. \"out : MaskedArray\")",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_replace_return_type arg:self arg:doc arg:np_ret arg:np_ma_ret arguments arg arg arg arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_quantization_config_generator",
    "source_code": "def _quantization_config_generator(self, detector_qconfig_info: DetectorQConfigInfo, module: torch.nn.Module) -> QConfig:\n    return detector_qconfig_info.generate_quantization_qconfig(module)",
    "docstring": "Returns the quantization configuration generated by the DetectorQConfigInfo object",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:_quantization_config_generator arg:self arg:detector_qconfig_info arg:module arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "offset",
    "source_code": "def offset(self, node: IRNode) -> str:\n    if node is None:\n        return '0'\n    return str(node.get_layout().offset)",
    "docstring": "Generates code which represents offset of a given node.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py",
    "ast_data": "FunctionDef name:offset arg:self arg:node arguments arg arg If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "is_transform_set",
    "source_code": "def is_transform_set(self):\n    return self._transformSet",
    "docstring": "Return whether the Artist has an explicitly set transform. This is *True* after has been called.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:is_transform_set arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, filename, dpi):\n    _log.debug('Dvi: %s', filename)\n    self.file = open(filename, 'rb')\n    self.dpi = dpi\n    self.fonts = {}\n    self.state = _dvistate.pre\n    self._missing_font = None",
    "docstring": "Read the data from the file named *filename* and convert TeX's internal units to units of *dpi* per inch. *dpi* only sets the units and does not limit the resolution. Use None to return TeX's internal units.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:filename arg:dpi arguments arg arg arg Call Assign Call Assign Assign Assign Assign"
  },
  {
    "library": "kornia",
    "name": "batched_linspace",
    "source_code": "def batched_linspace(start: Tensor, end: Tensor, step: int, dim: int) -> Tensor:\n    intervals = ((end - start) / (step - 1)).unsqueeze(dim)\n    broadcast_size = [1] * len(intervals.shape)\n    broadcast_size[dim] = step\n    samples = torch.arange(step, dtype=torch.float, device=start.device).reshape(broadcast_size)\n    samples = start.unsqueeze(dim) + samples * intervals\n    return samples",
    "docstring": "Batch version of torch.normalize (similar to the numpy one).",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\sold2\\sold2.py",
    "ast_data": "FunctionDef name:batched_linspace arg:start arg:end arg:step arg:dim arguments arg arg arg arg Assign Call Assign Call Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_unregister_class_lookup",
    "source_code": "def _unregister_class_lookup(cls, lookup, lookup_name=None):\n    if lookup_name is None:\n        lookup_name = lookup.lookup_name\n    del cls.class_lookups[lookup_name]\n    cls._clear_cached_class_lookups()",
    "docstring": "Remove given lookup from cls lookups. For use in tests only as it's not thread-safe.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query_utils.py",
    "ast_data": "FunctionDef name:_unregister_class_lookup arg:cls arg:lookup arg:lookup_name arguments arg arg arg If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "mean_absolute_error",
    "source_code": "@dispatch.add_dispatch_support\ndef mean_absolute_error(y_true, y_pred):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = math_ops.cast(y_true, y_pred.dtype)\n    return backend.mean(math_ops.abs(y_pred - y_true), axis=-1)",
    "docstring": "Computes the mean absolute error between labels and predictions. Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1)) Args: y_true: Ground truth values. shape = . y_pred: The predicted values. shape = . Returns: Mean absolute error values. shape = .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:mean_absolute_error arg:y_true arg:y_pred arguments arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_custom_module_class_keys",
    "source_code": "def get_custom_module_class_keys(custom_module_mapping: dict[QuantType, dict[type, type]]) -> list[Any]:\n    float_custom_module_classes: set[Any] = set()\n    for quant_mode in [QuantType.STATIC, QuantType.DYNAMIC, QuantType.WEIGHT_ONLY]:\n        quant_mode_custom_module_config = custom_module_mapping.get(quant_mode, {})\n        quant_mode_custom_module_classes = set(quant_mode_custom_module_config.keys())\n        float_custom_module_classes |= quant_mode_custom_module_classes\n    return list(float_custom_module_classes)",
    "docstring": "Get all the unique custom module keys in the custom config dict e.g. Input: { QuantType.STATIC: { CustomModule1: ObservedCustomModule }, QuantType.DYNAMIC: { CustomModule2: DynamicObservedCustomModule }, QuantType.WEIGHT_ONLY: { CustomModule3: WeightOnlyObservedCustomModule }, } Output: # extract the keys across all inner STATIC, DYNAMIC, and WEIGHT_ONLY dicts [CustomModule1, CustomModule2, CustomModule3]",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:get_custom_module_class_keys arg:custom_module_mapping arguments arg Call For Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "add_stylesheets",
    "source_code": "def add_stylesheets(self, handler):\n    pass",
    "docstring": "Add stylesheet(s) to the feed. Called from write().",
    "type": "method",
    "file_path": "django\\django\\utils\\feedgenerator.py",
    "ast_data": "FunctionDef name:add_stylesheets arg:self arg:handler arguments arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "_solve_eigen_covariance_no_intercept",
    "source_code": "def _solve_eigen_covariance_no_intercept(self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):\n    w = 1 / (eigvals + alpha)\n    A = (V * w).dot(V.T)\n    AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True))\n    y_hat = safe_sparse_dot(X, AXy, dense_output=True)\n    hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)\n    if len(y.shape) != 1:\n        hat_diag = hat_diag[:, np.newaxis]\n    return ((1 - hat_diag) / alpha, (y - y_hat) / alpha)",
    "docstring": "Compute dual coefficients and diagonal of G^-1. Used when we have a decomposition of X^T.X (n_samples > n_features and X is sparse), and not fitting an intercept.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_solve_eigen_covariance_no_intercept arg:self arg:alpha arg:y arg:sqrt_sw arg:X_mean arg:eigvals arg:V arg:X arguments arg arg arg arg arg arg arg arg Assign Assign Call Assign Call Call Assign Call Assign Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "print_",
    "source_code": "def print_(*objects, **kwargs):\n    unknown_kwargs = tuple(set(kwargs.keys()) - set(('sep', 'end', 'file', 'flush')))\n    if unknown_kwargs:\n        raise ValueError('invalid keyword arguments: {}'.format(unknown_kwargs))\n    print_fn = _py_print\n    for x in objects:\n        print_override = registry_lookup(print_registry, x)\n        if print_override is not None:\n            print_fn = print_override\n            break\n    if print_fn is _py_print:\n        assert not any((tensor_util.is_tf_type(s) for s in objects))\n    return print_fn(*objects, **kwargs)",
    "docstring": "Overload of the print builtin.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\py_builtins.py",
    "ast_data": "FunctionDef name:print_ arguments arg arg Assign Call Call Call Call If Raise Call Call Assign For Assign Call If Compare Assign If Compare Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "size",
    "source_code": "@abc.abstractmethod\ndef size(self) -> int:\n    pass",
    "docstring": "Returns the size of the queue at the time this method is called. Note that by the time `` method is called. That is, the following assertion should hold: size = q.size() res = q.get(size, timeout=0) assert size == len(res) -- or -- size = q.size() res = q.get(size * 2, timeout=1) assert size <= len(res) <= size * 2",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "get_next_as_list",
    "source_code": "def get_next_as_list(self, name=None):\n    del name\n    with ops.device(self._worker):\n        return self._format_data_list_with_options(self._iterator.get_next())",
    "docstring": "Get next element from the underlying iterator. Runs the iterator get_next() within a device scope. Since this doesn't use get_next_as_optional(), it is considerably faster than get_next_as_list(), but it raises EOFError if any of the device doesn't get any data. Args: name: not used. Returns: A list consisting of the next data from each device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:get_next_as_list arg:self arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "render_to_string",
    "source_code": "def render_to_string(template_name, context=None, request=None, using=None):\n    if isinstance(template_name, (list, tuple)):\n        template = select_template(template_name, using=using)\n    else:\n        template = get_template(template_name, using=using)\n    return template.render(context, request)",
    "docstring": "Load a template and render it with a context. Return a string. template_name may be a string or a list of strings.",
    "type": "function",
    "file_path": "django\\django\\template\\loader.py",
    "ast_data": "FunctionDef name:render_to_string arg:template_name arg:context arg:request arg:using arguments arg arg arg arg If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "log_softmax_v2",
    "source_code": "@tf_export('nn.log_softmax', 'math.log_softmax', v1=[])\n@dispatch.add_dispatch_support\ndef log_softmax_v2(logits, axis=None, name=None):\n    if axis is None:\n        axis = -1\n    return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)",
    "docstring": "Computes log softmax activations. For each batch and class we have logsoftmax = logits - log(reduce_sum(exp(logits), axis)) Args: logits: A non-empty . Must be one of the following types: , , . axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). Returns: A . Has the same type as . Same shape as . Raises: InvalidArgumentError: if is empty or is beyond the last dimension of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:log_softmax_v2 arg:logits arg:axis arg:name arguments arg arg arg If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "trace",
    "source_code": "def trace(self, name='trace'):\n    with self._name_scope(name):\n        return self._trace()",
    "docstring": "Trace of the linear operator, equal to sum of . If the operator is square, this is also the sum of the eigenvalues. Args: name: A name for this . Returns: Shape of same as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:trace arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "count_nonzero_v2",
    "source_code": "@tf_export('math.count_nonzero', v1=[])\n@dispatch.add_dispatch_support\ndef count_nonzero_v2(input, axis=None, keepdims=None, dtype=dtypes.int64, name=None):\n    if keepdims is None:\n        keepdims = False\n    with ops.name_scope(name, 'count_nonzero', [input]):\n        input = ops.convert_to_tensor(input, name='input')\n        if input.dtype == dtypes.bool:\n            predicate = input\n        else:\n            zero = array_ops.zeros([], dtype=input.dtype)\n            predicate = gen_math_ops.not_equal(input, zero)\n        return cast(reduce_sum(cast(predicate, dtypes.int64), axis=axis, keepdims=keepdims), dtype=dtype)",
    "docstring": "Computes number of nonzero elements across dimensions of a tensor. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each entry in . If is true, the reduced dimensions are retained with length 1. If has no entries, all dimensions are reduced, and a tensor with a single element is returned. **NOTE** Floating point comparison to zero is done by exact floating point equality check. Small values are **not** rounded to zero for purposes of the nonzero check. For example: **NOTE** Strings are compared against zero-length empty string . Any string with a size greater than zero is already considered as nonzero. For example: Args: input: The tensor to reduce. Should be of numeric type, , or . axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. dtype: The output dtype; defaults to . name: A name for the operation (optional). Returns: The reduced tensor (number of nonzero values).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:count_nonzero_v2 arg:input arg:axis arg:keepdims arg:dtype arg:name arguments arg arg arg arg arg If Compare Assign With Call Assign Call If Compare Assign Assign Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_strtoul",
    "source_code": "def _strtoul(s: str) -> int:\n    if not s:\n        return -1\n    for idx, c in enumerate(s):\n        if not (c.isdigit() or (idx == 0 and c in '+-')):\n            break\n        if idx + 1 == len(s):\n            idx += 1\n    return int(s[:idx]) if idx > 0 else -1",
    "docstring": "Return -1 or positive integer sequence string starts with.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:_strtoul arg:s arguments arg If Return return:yes For Call If BoolOp Call BoolOp Compare Compare If Compare Call Return return:yes Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "transform_feature",
    "source_code": "def transform_feature(self, transformation_cache, state_manager):\n    id_weight_pair = self.categorical_column.get_sparse_tensors(transformation_cache, state_manager)\n    return self._transform_id_weight_pair(id_weight_pair, self.variable_shape[-1])",
    "docstring": "Returns dense representing feature. Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables. Returns: Transformed feature . Raises: ValueError: if input rank is not known at graph building time.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "median",
    "source_code": "def median(self, *args, **kwds):\n    return self.ppf(0.5, *args, **kwds)",
    "docstring": "Median of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional Location parameter, Default is 0. scale : array_like, optional Scale parameter, Default is 1. Returns ------- median : float The median of the distribution. See Also -------- rv_discrete.ppf Inverse of the CDF",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:median arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_inv_standard_rvs",
    "source_code": "def _inv_standard_rvs(self, n, shape, dim, df, random_state):\n    A = np.zeros(shape + (dim, dim))\n    tri_rows, tri_cols = np.tril_indices(dim, k=-1)\n    n_tril = dim * (dim - 1) // 2\n    A[..., tri_rows, tri_cols] = random_state.normal(size=(*shape, n_tril))\n    rows = np.arange(dim)\n    chi_dfs = df - dim + 1 + rows\n    A[..., rows, rows] = random_state.chisquare(df=chi_dfs, size=(*shape, dim)) ** 0.5\n    return A",
    "docstring": "Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom random_state : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseedshapeA[..., :, :]invwishart(df, np.eye(dim))`. Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_inv_standard_rvs arg:self arg:n arg:shape arg:dim arg:df arg:random_state arguments arg arg arg arg arg arg Assign Call Assign Call Assign Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "media_downloaded",
    "source_code": "@abstractmethod\ndef media_downloaded(self, response: Response, request: Request, info: SpiderInfo, *, item: Any=None) -> FileInfo:\n    raise NotImplementedError",
    "docstring": "Handler for success downloads",
    "type": "method",
    "file_path": "scrapy\\scrapy\\pipelines\\media.py",
    "ast_data": "FunctionDef name:media_downloaded arg:self arg:response arg:request arg:info arguments arg arg arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "autoclose",
    "source_code": "def autoclose(self):\n    self.close_on_last_child = True\n    if self.child_counter == 0:\n        self.close()",
    "docstring": "Automatically close stream when all child streams are closed or if there are none.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\utils\\common.py",
    "ast_data": "FunctionDef name:autoclose arg:self arguments arg Assign If Compare Call"
  },
  {
    "library": "sphinx",
    "name": "_visit_sig_parameter_list",
    "source_code": "def _visit_sig_parameter_list(self, node: Element, parameter_group: type[Element]) -> None:\n    self.is_first_param = True\n    self.optional_param_level = 0\n    self.params_left_at_level = 0\n    self.param_group_index = 0\n    self.list_is_required_param = [isinstance(c, parameter_group) for c in node.children]\n    self.required_params_left = sum(self.list_is_required_param)\n    self.param_separator = '\\\\sphinxparamcomma '\n    self.multi_line_parameter_list = node.get('multi_line_parameter_list', False)\n    self.trailing_comma = node.get('multi_line_trailing_comma', False)",
    "docstring": "Visit a signature parameters or type parameters list. The *parameter_group* value is the type of a child node acting as a required parameter or as a set of contiguous optional parameters. The caller is responsible for closing adding surrounding LaTeX macro argument start and stop tokens.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\latex.py",
    "ast_data": "FunctionDef name:_visit_sig_parameter_list arg:self arg:node arg:parameter_group arguments arg arg arg Assign Assign Assign Assign Assign Call Assign Call Assign Assign Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "Ticker",
    "source_code": "class Ticker:\n\n    def __init__(self):\n        self._locator = None\n        self._formatter = None\n        self._locator_is_default = True\n        self._formatter_is_default = True\n\n    @property\n    def locator(self):\n        return self._locator\n\n    @locator.setter\n    def locator(self, locator):\n        if not isinstance(locator, mticker.Locator):\n            raise TypeError('locator must be a subclass of matplotlib.ticker.Locator')\n        self._locator = locator\n\n    @property\n    def formatter(self):\n        return self._formatter\n\n    @formatter.setter\n    def formatter(self, formatter):\n        if not isinstance(formatter, mticker.Formatter):\n            raise TypeError('formatter must be a subclass of matplotlib.ticker.Formatter')\n        self._formatter = formatter",
    "docstring": "A container for the objects defining tick position and format. Attributes ---------- locator : subclass Determines the positions of the ticks. formatter : subclass Determines the format of the tick labels.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "ClassDef name:Ticker FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign FunctionDef name:locator arg:self arguments arg Return return:yes FunctionDef name:locator arg:self arg:locator arguments arg arg If Call Raise Call Assign FunctionDef name:formatter arg:self arguments arg Return return:yes FunctionDef name:formatter arg:self arg:formatter arguments arg arg If Call Raise Call Assign"
  },
  {
    "library": "scipy",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return iter(self._indices)",
    "docstring": "Returns an iterator of the elements in the disjoint set. Elements are ordered by insertion order.",
    "type": "method",
    "file_path": "scipy\\scipy\\_lib\\_disjoint_set.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_max_pool_grad_flops",
    "source_code": "@ops.RegisterStatistics('MaxPoolGrad', 'flops')\ndef _max_pool_grad_flops(graph, node):\n    _verify_conv_data_format(node)\n    kernel_shape = list(node.attr['ksize'].list.i)\n    kernel_area = _list_product(kernel_shape)\n    orig_out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])\n    orig_out_shape.assert_is_fully_defined()\n    max_pool_ops = kernel_area * orig_out_shape.num_elements()\n    return ops.OpStats('flops', max_pool_ops + orig_out_shape.num_elements())",
    "docstring": "Compute flops for MaxPoolGrad operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_max_pool_grad_flops arg:graph arg:node arguments arg arg Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "IsLoopEnter",
    "source_code": "def IsLoopEnter(op):\n    return op.type == 'Enter' or op.type == 'RefEnter'",
    "docstring": "Returns true if is an Enter.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util.py",
    "ast_data": "FunctionDef name:IsLoopEnter arg:op arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, default, domains=None, use_x_forwarded_host=True):\n    self.default = default\n    self.domains = domains or {}\n    self.use_x_forwarded_host = use_x_forwarded_host",
    "docstring": "Initialize a virtual host app. :param default: The default WSGI application :type default: WSGI application :param use_x_forwarded_host: If True (the default), any \"X-Forwarded-Host\" request header will be used instead of the \"Host\" header. This is commonly added by HTTP servers (such as Apache) when proxying. :type use_x_forwarded_host: Bool, optional :param domains: A dict of {host header value: application} pairs. The incoming \"Host\" request header is looked up in this dict, and, if a match is found, the corresponding WSGI application will be called instead of the default. Note that you often need separate entries for \"example.com\" and \"www.example.com\". In addition, \"Host\" headers may contain the port number. :type domains: Dict, optional",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:default arg:domains arg:use_x_forwarded_host arguments arg arg arg arg Assign Assign BoolOp Assign"
  },
  {
    "library": "cherrypy",
    "name": "umask",
    "source_code": "@property\ndef umask(self):\n    return self._umask",
    "docstring": "The default permission mode for newly created files and directories. Usually expressed in octal format, for example, ``. Availability: Unix, Windows.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:umask arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "hyperparameters",
    "source_code": "@property\ndef hyperparameters(self):\n    r = [getattr(self, attr) for attr in dir(self) if attr.startswith('hyperparameter_')]\n    return r",
    "docstring": "Returns a list of all hyperparameter specifications.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:hyperparameters arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "compute_kkt_optimality",
    "source_code": "def compute_kkt_optimality(g, on_bound):\n    g_kkt = g * on_bound\n    free_set = on_bound == 0\n    g_kkt[free_set] = np.abs(g[free_set])\n    return np.max(g_kkt)",
    "docstring": "Compute the maximum violation of KKT conditions.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_lsq\\bvls.py",
    "ast_data": "FunctionDef name:compute_kkt_optimality arg:g arg:on_bound arguments arg arg Assign Assign Compare Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "method_has_no_args",
    "source_code": "def method_has_no_args(meth):\n    count = len([p for p in _get_callable_parameters(meth) if p.kind in ARG_KINDS])\n    return count == 0 if inspect.ismethod(meth) else count == 1",
    "docstring": "Return True if a method only accepts 'self'.",
    "type": "function",
    "file_path": "django\\django\\utils\\inspect.py",
    "ast_data": "FunctionDef name:method_has_no_args arg:meth arguments arg Assign Call Call Compare Return return:yes Call Compare Compare"
  },
  {
    "library": "kornia",
    "name": "run",
    "source_code": "def run(self, num_epochs: int=1) -> None:\n    for i_epoch in range(num_epochs):\n        epoch_psnr: float = self._train_one_epoch()\n        if i_epoch % 10 == 0:\n            current_time = datetime.now().strftime('%H:%M:%S')\n            logger.info('Epoch: %d: epoch_psnr = %f; time: %s', i_epoch, epoch_psnr, current_time)",
    "docstring": "Run training epochs. Args: num_epochs: number of epochs to run. Default: 1.",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\nerf_solver.py",
    "ast_data": "FunctionDef name:run arg:self arg:num_epochs arguments arg arg For Call Call If Compare Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_and_export_metrics",
    "source_code": "def _convert_and_export_metrics(self, convert_func, *args, **kwargs):\n    self._increase_conversion_attempt_metric()\n    self._save_conversion_params_metric()\n    start_time = time.process_time()\n    result = convert_func(self, *args, **kwargs)\n    elapsed_time_ms = (time.process_time() - start_time) * 1000\n    if result:\n        self._increase_conversion_success_metric()\n    self._set_conversion_latency_metric(round(elapsed_time_ms))\n    self._tflite_metrics.export_metrics()\n    if self.exclude_conversion_metadata or self._experimental_use_buffer_offset:\n        return result\n    model_object = flatbuffer_utils.convert_bytearray_to_object(result)\n    if _check_model_use_buffer_offset(model_object):\n        return result\n    sparsity_modes = _get_sparsity_modes(model_object)\n    model_hash = _get_model_hash(model_object)\n    self._metadata.options.modelOptimizationModes.extend(sparsity_modes)\n    self._metadata.environment.modelHash = model_hash\n    model_object = _populate_conversion_metadata(model_object, self._metadata)\n    return flatbuffer_utils.convert_object_to_bytearray(model_object)",
    "docstring": "Wraps around convert function to export metrics. Args: convert_func: The convert function to wrap. *args: Positional arguments of the convert function. **kwargs: The keyword arguments of the convert function. Returns: The decorator to wrap the convert function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_convert_and_export_metrics arg:self arg:convert_func arguments arg arg arg arg Call Call Assign Call Assign Call Assign Call If Call Call Call Call If BoolOp Return return:yes Assign Call If Call Return return:yes Assign Call Assign Call Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "edge_centers",
    "source_code": "@property\ndef edge_centers(self):\n    x0, y0, width, height = self._rect_bbox\n    w = width / 2.0\n    h = height / 2.0\n    xe = (x0, x0 + w, x0 + width, x0 + w)\n    ye = (y0 + h, y0, y0 + h, y0 + height)\n    transform = self._get_rotation_transform()\n    coords = transform.transform(np.array([xe, ye]).T).T\n    return (coords[0], coords[1])",
    "docstring": "Midpoint of rectangle edges in data coordinates from left, moving anti-clockwise.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:edge_centers arg:self arguments arg Assign Assign Assign Assign Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ThetaFormatter",
    "source_code": "class ThetaFormatter(Formatter):\n\n    def __init__(self, round_to=1.0):\n        self._round_to = round_to\n\n    def __call__(self, x, pos=None):\n        degrees = round(np.rad2deg(x) / self._round_to) * self._round_to\n        return f'{degrees:0.0f}°'",
    "docstring": "Used to format the theta tick labels. Converts the native unit of radians into degrees and adds a degree symbol.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "ClassDef name:ThetaFormatter FunctionDef name:__init__ arg:self arg:round_to arguments arg arg Assign FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "enumerate_support",
    "source_code": "def enumerate_support(self, expand: bool=True) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Returns tensor containing all values supported by a discrete distribution. The result will enumerate over dimension 0, so the shape of the result will be (where for univariate distributions). Note that this enumerates over all batched tensors in lock-step . With , enumeration happens along dim 0, but with the remaining batch dimensions being singleton dimensions, . To iterate over the full Cartesian product use . Args: expand (bool): whether to expand the support over the batch dims to match the distribution's . Returns: Tensor iterating over dimension 0.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:enumerate_support arg:self arg:expand arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_deserialize_keras_tensor",
    "source_code": "def _deserialize_keras_tensor(t):\n    if isinstance(t, tf_utils.ListWrapper):\n        t = t.as_list()\n        layer_name = t[0]\n        node_index = t[1]\n        tensor_index = t[2]\n        layer = layer_map[layer_name]\n        new_node_index = get_node_index(layer, node_index)\n        if new_node_index is None:\n            raise IndexError\n        node = layer._inbound_nodes[new_node_index]\n        return nest.flatten(node.outputs)[tensor_index]\n    return t",
    "docstring": "Deserializes a single Keras Tensor passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:_deserialize_keras_tensor arg:t arguments arg If Call Assign Call Assign Assign Assign Assign Assign Call If Compare Raise Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "msvc_version",
    "source_code": "def msvc_version(compiler):\n    if not compiler.compiler_type == 'msvc':\n        raise ValueError('Compiler instance is not msvc (%s)' % compiler.compiler_type)\n    return compiler._MSVCCompiler__version",
    "docstring": "Return version major and minor of compiler instance if it is MSVC, raise an exception otherwise.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:msvc_version arg:compiler arguments arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "private_bytes",
    "source_code": "@abc.abstractmethod\ndef private_bytes(self, encoding: _serialization.Encoding, format: _serialization.PrivateFormat, encryption_algorithm: _serialization.KeySerializationEncryption) -> bytes:\n    pass",
    "docstring": "The serialized bytes of the private key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x25519.py",
    "ast_data": "FunctionDef name:private_bytes arg:self arg:encoding arg:format arg:encryption_algorithm arguments arg arg arg arg"
  },
  {
    "library": "numpy",
    "name": "__int__",
    "source_code": "def __int__(self):\n    if self.size > 1:\n        raise TypeError('Only length-1 arrays can be converted to Python scalars')\n    elif self._mask:\n        raise MaskError('Cannot convert masked element to a Python int.')\n    return int(self.item())",
    "docstring": "Convert to int.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__int__ arg:self arguments arg If Compare Raise Call If Raise Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_constrained_layout_pads",
    "source_code": "@_api.deprecated('3.6', alternative='fig.get_layout_engine().get()', pending=True)\ndef get_constrained_layout_pads(self, relative=False):\n    if not isinstance(self.get_layout_engine(), ConstrainedLayoutEngine):\n        return (None, None, None, None)\n    info = self.get_layout_engine().get()\n    w_pad = info['w_pad']\n    h_pad = info['h_pad']\n    wspace = info['wspace']\n    hspace = info['hspace']\n    if relative and (w_pad is not None or h_pad is not None):\n        renderer = self._get_renderer()\n        dpi = renderer.dpi\n        w_pad = w_pad * dpi / renderer.width\n        h_pad = h_pad * dpi / renderer.height\n    return (w_pad, h_pad, wspace, hspace)",
    "docstring": "Get padding for `constrainedlayout_guideTrue`, then convert from inches to figure relative.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_constrained_layout_pads arg:self arg:relative arguments arg arg If Call Call Return return:no Assign Call Call Assign Assign Assign Assign If BoolOp BoolOp Compare Compare Assign Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "to_numpy_array",
    "source_code": "def to_numpy_array(self):\n    return encode_resource_handle(self._get_resource_handle())",
    "docstring": "Convert a TensorHandle object to a feedable numpy value. Returns: A numpy array of a custom struct type that can be used as a feed value to run().",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:to_numpy_array arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "onnx_impl",
    "source_code": "def onnx_impl(target: _registration.TorchOp | tuple[_registration.TorchOp, ...], *, trace_only: bool=False, complex: bool=False, opset_introduced: int=18, no_compile: bool=False, private: bool=False) -> Callable[[_T], _T]:\n    if isinstance(target, torch._ops.OpOverloadPacket):\n        raise TypeError(f\"Target '{target}' should be provided as an OpOverload instead of an OpOverloadPacket. You can get the default overload with <op>.default\")\n\n    def wrapper(func: _T) -> _T:\n        processed_func: Any\n        if no_compile:\n            processed_func = func\n        else:\n            torchlib_opset = onnxscript.values.Opset(domain=_constants.TORCHLIB_DOMAIN, version=1)\n            if not trace_only:\n                processed_func = onnxscript.script(opset=torchlib_opset)(func)\n            else:\n                processed_func = onnxscript.TracedOnnxFunction(torchlib_opset, func)\n        if not private:\n            if not isinstance(target, Sequence):\n                targets = (target,)\n            else:\n                targets = target\n            for t in targets:\n                _registry.append(_registration.OnnxDecompMeta(onnx_function=processed_func, fx_target=t, signature=None, is_complex=complex, opset_introduced=opset_introduced, skip_signature_inference=no_compile))\n        return processed_func\n    return wrapper",
    "docstring": "Register an ONNX implementation of a torch op.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_torchlib\\_torchlib_registry.py",
    "ast_data": "FunctionDef name:onnx_impl arg:target arguments arg arg arg arg arg arg If Call Raise Call FunctionDef name:wrapper arg:func arguments arg If Assign Assign Call If Assign Call Call Assign Call If If Call Assign Assign For Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "toggle_collection_dynamic",
    "source_code": "def toggle_collection_dynamic(self, enable: bool, activities: Iterable[ProfilerActivity]):\n    if not self.profiler:\n        return\n    self.profiler.toggle_collection_dynamic(enable, activities)",
    "docstring": "Toggle collection of activities on/off at any point of collection. Currently supports toggling Torch Ops (CPU) and CUDA activity supported in Kineto Args: activities (iterable): list of activity groups to use in profiling, supported values: `` Examples: .. code-block:: python with torch.profiler.profile( activities=[ torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA, ] ) as p: code_to_profile_0() // turn off collection of all CUDA activity p.toggle_collection_dynamic(False, [torch.profiler.ProfilerActivity.CUDA]) code_to_profile_1() // turn on collection of all CUDA activity p.toggle_collection_dynamic(True, [torch.profiler.ProfilerActivity.CUDA]) code_to_profile_2() print(p.key_averages().table( sort_by=\"self_cuda_time_total\", row_limit=-1))",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:toggle_collection_dynamic arg:self arg:enable arg:activities arguments arg arg arg If Return return:no Call"
  },
  {
    "library": "scipy",
    "name": "Infinity",
    "source_code": "class Infinity(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))\n        self.global_optimum = [[1e-16 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum(x ** 6.0 * (sin(1.0 / x) + 2.0))",
    "docstring": "Infinity objective function. This class defines the Infinity [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Infinity}}(x) = \\sum_{i=1}^{n} x_i^{6} \\left [ \\sin\\left ( \\frac{1}{x_i} \\right ) + 2 \\right ] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_I.py",
    "ast_data": "ClassDef name:Infinity Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "def is_available():\n    return torch._C._has_kleidiai",
    "docstring": "Return whether PyTorch is built with KleidiAI support.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\kleidiai\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "TRACE",
    "source_code": "def TRACE(msg):\n    cherrypy.log(msg, context='TOOLS.AUTH_DIGEST')",
    "docstring": "Log message in TOOLS.AUTH_DIGEST context.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py",
    "ast_data": "FunctionDef name:TRACE arg:msg arguments arg Call"
  },
  {
    "library": "django",
    "name": "SimpleUploadedFile",
    "source_code": "class SimpleUploadedFile(InMemoryUploadedFile):\n\n    def __init__(self, name, content, content_type='text/plain'):\n        content = content or b''\n        super().__init__(BytesIO(content), None, name, content_type, len(content), None, None)\n\n    @classmethod\n    def from_dict(cls, file_dict):\n        return cls(file_dict['filename'], file_dict['content'], file_dict.get('content-type', 'text/plain'))",
    "docstring": "A simple representation of a file, which just has content, size, and a name.",
    "type": "class",
    "file_path": "django\\django\\core\\files\\uploadedfile.py",
    "ast_data": "ClassDef name:SimpleUploadedFile FunctionDef name:__init__ arg:self arg:name arg:content arg:content_type arguments arg arg arg arg Assign BoolOp Call Call Call Call FunctionDef name:from_dict arg:cls arg:file_dict arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_logs_specs_class",
    "source_code": "def _get_logs_specs_class(logs_specs_name: Optional[str]) -> type[LogsSpecs]:\n    logs_specs_cls = None\n    if logs_specs_name is not None:\n        eps = metadata.entry_points()\n        if hasattr(eps, 'select'):\n            group = eps.select(group='torchrun.logs_specs')\n            if group.select(name=logs_specs_name):\n                logs_specs_cls = group[logs_specs_name].load()\n        elif (specs := eps.get('torchrun.logs_specs')):\n            if (entrypoint_list := [ep for ep in specs if ep.name == logs_specs_name]):\n                logs_specs_cls = entrypoint_list[0].load()\n        if logs_specs_cls is None:\n            raise ValueError(f\"Could not find entrypoint under 'torchrun.logs_specs[{logs_specs_name}]' key\")\n        logger.info(\"Using logs_spec '%s' mapped to %s\", logs_specs_name, str(logs_specs_cls))\n    else:\n        logs_specs_cls = DefaultLogsSpecs\n    return logs_specs_cls",
    "docstring": "Attemps to load entrypoint with key of param. Provides plugin mechanism to provide custom implementation of LogsSpecs. Returns when logs_spec_name is None. Raises ValueError when entrypoint for can't be found in entrypoints.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\run.py",
    "ast_data": "FunctionDef name:_get_logs_specs_class arg:logs_specs_name arguments arg Assign If Compare Assign Call If Call Assign Call If Call Assign Call If Call If Compare Assign Call If Compare Raise Call Call Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "UnreferencedFootnotesDetector",
    "source_code": "class UnreferencedFootnotesDetector(SphinxTransform):\n    default_priority = Footnotes.default_priority + 2\n\n    def apply(self, **kwargs: Any) -> None:\n        for node in self.document.footnotes:\n            if not node['backrefs'] and node['names']:\n                logger.warning(__('Footnote [%s] is not referenced.'), node['names'][0] if node['names'] else node['dupnames'][0], type='ref', subtype='footnote', location=node)\n        for node in self.document.symbol_footnotes:\n            if not node['backrefs']:\n                logger.warning(__('Footnote [*] is not referenced.'), type='ref', subtype='footnote', location=node)\n        for node in self.document.autofootnotes:\n            if not node['backrefs'] and node['names']:\n                logger.warning(__('Footnote [#] is not referenced.'), type='ref', subtype='footnote', location=node)",
    "docstring": "Detect unreferenced footnotes and emit warnings",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:UnreferencedFootnotesDetector Assign FunctionDef name:apply arg:self arguments arg arg For If BoolOp Call Call For If Call Call For If BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reset_from_key_counter",
    "source_code": "def reset_from_key_counter(self, key, counter):\n    counter = _convert_to_state_tensor(counter)\n    key = _convert_to_state_tensor(key)\n    counter.shape.assert_is_compatible_with([_get_state_size(self.algorithm) - 1])\n    key.shape.assert_is_compatible_with([])\n    key = array_ops.reshape(key, [1])\n    state = array_ops.concat([counter, key], 0)\n    self._state_var.assign(state)",
    "docstring": "Resets the generator by a new key-counter pair. See for the meaning of \"key\" and \"counter\". Args: key: the new key. counter: the new counter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:reset_from_key_counter arg:self arg:key arg:counter arguments arg arg arg Assign Call Assign Call Call Call Call Assign Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "get_source_files",
    "source_code": "def get_source_files(source_path: str) -> typing.Generator[str, None, None]:\n    for root, dirs, fnames in os.walk(source_path):\n        root_rel_path = os.path.relpath(root, source_path)\n        for fname in fnames:\n            yield os.path.join(root_rel_path, fname)",
    "docstring": "Generate the list of files present in the source directory.",
    "type": "function",
    "file_path": "pandas\\web\\pandas_web.py",
    "ast_data": "FunctionDef name:get_source_files arg:source_path arguments arg For Call Assign Call For Call"
  },
  {
    "library": "numpy",
    "name": "have_f77c",
    "source_code": "def have_f77c(self):\n    simple_fortran_subroutine = '\\n        subroutine simple\\n        end\\n        '\n    config_cmd = self.get_config_cmd()\n    flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')\n    return flag",
    "docstring": "Check for availability of Fortran 77 compiler. Use it inside source generating function to ensure that setup distribution instance has been initialized. Notes ----- True if a Fortran 77 compiler is available (because a simple Fortran 77 code was able to be compiled successfully).",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:have_f77c arg:self arguments arg Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "model_ngettext",
    "source_code": "def model_ngettext(obj, n=None):\n    if isinstance(obj, models.query.QuerySet):\n        if n is None:\n            n = obj.count()\n        obj = obj.model\n    d = model_format_dict(obj)\n    singular, plural = (d['verbose_name'], d['verbose_name_plural'])\n    return ngettext(singular, plural, n or 0)",
    "docstring": "Return the appropriate or value for depending on the count . may be a instance, subclass, or instance. If is a instance, is optional and the length of the is used.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\utils.py",
    "ast_data": "FunctionDef name:model_ngettext arg:obj arg:n arguments arg arg If Call If Compare Assign Call Assign Assign Call Assign Return return:yes Call BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "__rmul__",
    "source_code": "def __rmul__(self, other):\n    return self * other",
    "docstring": "Returns the product of and . Args: other: Another Dimension, or a value accepted by . Returns: A Dimension whose value is the product of and .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__rmul__ arg:self arg:other arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "local_devices_from_num_gpus",
    "source_code": "def local_devices_from_num_gpus(num_gpus):\n    return tuple(('/device:GPU:%d' % i for i in range(num_gpus))) or ('/device:CPU:0',)",
    "docstring": "Returns device strings for local GPUs or CPU.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\device_util.py",
    "ast_data": "FunctionDef name:local_devices_from_num_gpus arg:num_gpus arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "align_trace_from_beginning",
    "source_code": "def align_trace_from_beginning(entries: dict[int, list[dict[str, Any]]]) -> dict[int, list[dict[str, Any]]]:\n    maximum_starting_record_id = 0\n    for rank in entries:\n        first_record_id = entries[rank][0]['record_id']\n        maximum_starting_record_id = max(maximum_starting_record_id, first_record_id)\n    for rank in entries:\n        entries[rank] = [entry for entry in entries[rank] if entry['record_id'] >= maximum_starting_record_id]\n    return entries",
    "docstring": "Align the trace entries by record ID for entries. This function takes a dictionary of rank names to lists of trace entries as input. Each trace entry is a dictionary containing information about a collective operation, including its unique identifier ( is monotonically increasing as we write into the ring buffer). The function finds the largest starting point across all ranks by taking the maximum value of the first entry in each rank. Finally, it filters out any entries with values less than the maximum starting point. The function returns the updated dictionary of sorted and filtered trace entries. Args: entries (Dict[str, List[Dict[str, Any]]]): A dictionary of rank names to lists of trace entries. Returns: entries (Dict[str, List[Dict[str, Any]]]): Entries sorted by record ID and filtered by the maximum starting point.",
    "type": "function",
    "file_path": "pytorch\\tools\\flight_recorder\\components\\utils.py",
    "ast_data": "FunctionDef name:align_trace_from_beginning arg:entries arguments arg Assign For Assign Assign Call For Assign Compare Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "request_httprepr",
    "source_code": "def request_httprepr(request: Request) -> bytes:\n    parsed = urlparse_cached(request)\n    path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, ''))\n    s = to_bytes(request.method) + b' ' + to_bytes(path) + b' HTTP/1.1\\r\\n'\n    s += b'Host: ' + to_bytes(parsed.hostname or b'') + b'\\r\\n'\n    if request.headers:\n        s += request.headers.to_string() + b'\\r\\n'\n    s += b'\\r\\n'\n    s += request.body\n    return s",
    "docstring": "Return the raw HTTP representation (as bytes) of the given request. This is provided only for reference since it's not the actual stream of bytes that will be send when performing the request (that's controlled by Twisted).",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\request.py",
    "ast_data": "FunctionDef name:request_httprepr arg:request arguments arg Assign Call Assign Call BoolOp Assign Call Call Call BoolOp If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "random_normal",
    "source_code": "def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):\n    if self.seed:\n        op = stateless_random_ops.stateless_random_normal\n    else:\n        op = random_ops.random_normal\n    return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)",
    "docstring": "A deterministic random normal if seed is passed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:random_normal arg:self arg:shape arg:mean arg:stddev arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, dataset=None, worker=None, devices=None, components=None, element_spec=None, options=None, canonicalize_devices=None):\n    if worker is None or devices is None:\n        raise ValueError('Both `worker` and `devices` should be provided')\n    error_message = 'Either `dataset` or both `components` and `element_spec` need to be provided.'\n    self._options = options\n    self._canonicalize_devices = canonicalize_devices\n    if dataset is None:\n        if components is None or element_spec is None:\n            raise ValueError(error_message)\n        self._element_spec = element_spec\n        self._worker = worker\n        self._devices = devices\n        self._iterator = components[0]\n    else:\n        if components is not None or element_spec is not None:\n            raise ValueError(error_message)\n        super(_SingleWorkerOwnedDatasetIterator, self).__init__(dataset, worker, devices, self._options)",
    "docstring": "Create iterator for the to fetch data to worker's . is used to prefetch input to the devices on the given worker. The lifetime of this iterator is tied to the encompassing python object. Once we go out of scope of the python object or return from a tf.function the underlying iterator resource is deleted. Args: dataset: A instance. worker: Worker on which ops should be created. devices: Distribute data from to these devices. components: Tensor components to construct the _SingleWorkerOwnedDatasetIterator from. element_spec: A nested structure of objects that represents the type specification of elements of the iterator. options: used to control options on how this dataset is distributed. canonicalize_devices: Whether to canonicalize devices for workers fully or partially. If False, it will partially canonicalize devices by removing job and task.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dataset arg:worker arg:devices arg:components arg:element_spec arg:options arg:canonicalize_devices arguments arg arg arg arg arg arg arg arg If BoolOp Compare Compare Raise Call Assign Assign Assign If Compare If BoolOp Compare Compare Raise Call Assign Assign Assign Assign If BoolOp Compare Compare Raise Call Call Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, input: Tensor, params: Optional[List[ParamItem]]=None, extra_args: Optional[Dict[str, Any]]=None) -> Tensor:\n    if len(input.shape) != 5:\n        raise AssertionError(f'Input must be a 5-dim tensor. Got {input.shape}.')\n    if params is None:\n        self._params = self.forward_parameters(input.shape)\n        params = self._params\n    output = self.transform_inputs(input, params, extra_args=extra_args)\n    return output",
    "docstring": "Define the video computation performed.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\video.py",
    "ast_data": "FunctionDef name:forward arg:self arg:input arg:params arg:extra_args arguments arg arg arg arg If Compare Call Raise Call If Compare Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, X, Y=None, eval_gradient=False):\n    if eval_gradient:\n        K = []\n        K_grad = []\n        for kernel in self.kernels:\n            K_single, K_grad_single = kernel(X, Y, eval_gradient)\n            K.append(K_single)\n            K_grad.append(K_grad_single[..., np.newaxis])\n        return (np.dstack(K), np.concatenate(K_grad, 3))\n    else:\n        return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels])",
    "docstring": "Return the kernel k(X, Y) and optionally its gradient. Note that this compound kernel returns the results of all simple kernel stacked along an additional axis. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object, default=None Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_X, n_features) or list of object, default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims, n_kernels), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when is True.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:X arg:Y arg:eval_gradient arguments arg arg arg arg If Assign Assign For Assign Call Call Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_is_all_dates",
    "source_code": "@cache_readonly\n@final\ndef _is_all_dates(self) -> bool:\n    if needs_i8_conversion(self.dtype):\n        return True\n    elif self.dtype != _dtype_obj:\n        return False\n    elif self._is_multi:\n        return False\n    return is_datetime_array(ensure_object(self._values))",
    "docstring": "Whether or not the index values only consist of dates.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_is_all_dates arg:self arguments arg If Call Return return:yes If Compare Return return:yes If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "make_graph_return_tuple",
    "source_code": "def make_graph_return_tuple(gm: GraphModule, inputs: Sequence[InputType], compile_gm: Callable[..., Any]) -> Callable[..., Any]:\n    node = output_node(gm)\n    rv, = node.args\n    rv, spec = pytree.tree_flatten(rv)\n    with gm.graph.inserting_before(node):\n        gm.graph.output(rv)\n    gm.graph.erase_node(node)\n    assert graph_returns_tuple(gm)\n    compiled_fn = compile_gm(gm, inputs)\n\n    @functools.wraps(compiled_fn)\n    def wrapper(*args: Any, **kwargs: Any) -> Any:\n        return pytree.tree_unflatten(compiled_fn(*args, **kwargs), spec)\n    return wrapper",
    "docstring": "Mutate gm so it returns a tuple. This is only needed for graphs not created by torchdynamo that return non-tuples.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx.py",
    "ast_data": "FunctionDef name:make_graph_return_tuple arg:gm arg:inputs arg:compile_gm arguments arg arg arg Assign Call Assign Assign Call With Call Call Call Call Assign Call FunctionDef name:wrapper arguments arg arg Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reset_accumulated_memory_stats",
    "source_code": "def reset_accumulated_memory_stats(device: _device_t=None) -> None:\n    device = _get_device_index(device, optional=True)\n    return torch._C._xpu_resetAccumulatedMemoryStats(device)",
    "docstring": "Reset the \"accumulated\" (historical) stats tracked by the XPU memory allocator. See :func: for details. Accumulated stats correspond to the and keys in each individual stat dict. Args: device (torch.device or int or str, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\memory.py",
    "ast_data": "FunctionDef name:reset_accumulated_memory_stats arg:device arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_inverse_offsets",
    "source_code": "def get_inverse_offsets(offsets: TensorBox, jagged_len: Union[int, sympy.Expr], realize: bool=True) -> TensorBox:\n    if hasattr(offsets, 'inverse_offsets'):\n        return offsets.inverse_offsets\n    offsets.realize()\n    device: torch.device = offsets.get_device_or_error()\n    dtype: torch.dtype = offsets.get_dtype()\n\n    def inner_fn(index):\n        idx = index[0]\n        bucket = ops.bucketize(values=ops.index_expr(idx, dtype), boundaries=(offsets.get_name(), offsets.get_size()[-1], offsets.get_size()[0] * offsets.get_stride()[0], offsets.get_stride()[-1]), boundary_indices=0, indexing_dtype=dtype, right=True)\n        return bucket - 1\n    inverse_offsets = Pointwise.create(device=device, dtype=dtype, inner_fn=inner_fn, ranges=[jagged_len])\n    if realize:\n        inverse_offsets.realize()\n    offsets.inverse_offsets = inverse_offsets\n    return inverse_offsets",
    "docstring": "Returns \"inverse_offsets\" - the inverse of the offsets array. offsets maps batch index (dense) to jagged index (i.e. offset into jagged tensor). inverse_offsets maps jagged index to batch index. e.g. for offsets [0, 3, 4, 9, 10] this will return inverse_offsets = [0, 0, 0, 1, 2, 2, 2, 2, 2, 3] For the given offsets, the computed inverse_offsets are cached on the first call and reused in the further calls.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\jagged_lowerings.py",
    "ast_data": "FunctionDef name:get_inverse_offsets arg:offsets arg:jagged_len arg:realize arguments arg arg arg If Call Return return:yes Call Call Call FunctionDef name:inner_fn arg:index arguments arg Assign Assign Call Call Call Call Call Call Call Return return:yes Assign Call If Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "KerasModelTypeCombination",
    "source_code": "class KerasModelTypeCombination(test_combinations.TestCombination):\n\n    def context_managers(self, kwargs):\n        model_type = kwargs.pop('model_type', None)\n        if model_type in KERAS_MODEL_TYPES:\n            return [testing_utils.model_type_scope(model_type)]\n        else:\n            return []\n\n    def parameter_modifiers(self):\n        return [test_combinations.OptionalParameter('model_type')]",
    "docstring": "Combination for Keras model types when doing model test. It by default includes 'functional', 'subclass', 'sequential'. Various methods in to get models will auto-generate a model of the currently active Keras model type. This allows unittests to confirm the equivalence between different Keras models.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\combinations.py",
    "ast_data": "ClassDef name:KerasModelTypeCombination FunctionDef name:context_managers arg:self arg:kwargs arguments arg arg Assign Call If Compare Return return:yes Call Return return:no FunctionDef name:parameter_modifiers arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "Backend",
    "source_code": "class Backend(Enum):\n    ARRAY_API_STRICT = ('array_api_strict', _compat.is_array_api_strict_namespace)\n    NUMPY = ('numpy', _compat.is_numpy_namespace)\n    NUMPY_READONLY = ('numpy_readonly', _compat.is_numpy_namespace)\n    CUPY = ('cupy', _compat.is_cupy_namespace)\n    TORCH = ('torch', _compat.is_torch_namespace)\n    DASK = ('dask.array', _compat.is_dask_namespace)\n    SPARSE = ('sparse', _compat.is_pydata_sparse_namespace)\n    JAX = ('jax.numpy', _compat.is_jax_namespace)\n\n    def __new__(cls, value: str, _is_namespace: Callable[[ModuleType], bool]):\n        obj = object.__new__(cls)\n        obj._value_ = value\n        return obj\n\n    def __init__(self, value: str, is_namespace: Callable[[ModuleType], bool]):\n        self.is_namespace = is_namespace\n\n    def __str__(self) -> str:\n        return cast(str, self.value)",
    "docstring": "All array library backends explicitly tested by array-api-extra. Parameters ---------- value : str Name of the backend's module. is_namespace : Callable[[ModuleType], bool] Function to check whether an input module is the array namespace corresponding to the backend.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_backends.py",
    "ast_data": "ClassDef name:Backend Assign Assign Assign Assign Assign Assign Assign Assign FunctionDef name:__new__ arg:cls arg:value arg:_is_namespace arguments arg arg arg Assign Call Assign Return return:yes FunctionDef name:__init__ arg:self arg:value arg:is_namespace arguments arg arg arg Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "device_count",
    "source_code": "@lru_cache(maxsize=1)\ndef device_count() -> int:\n    if not _is_compiled():\n        return 0\n    return torch._C._xpu_getDeviceCount()",
    "docstring": "Return the number of XPU device available.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:device_count arguments If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add_notice_to_docstring",
    "source_code": "def add_notice_to_docstring(doc, instructions, no_doc_str, suffix_str, notice, notice_type='Warning'):\n    allowed_notice_types = ['Deprecated', 'Warning', 'Caution', 'Important', 'Note']\n    if notice_type not in allowed_notice_types:\n        raise ValueError(f'Unrecognized notice type. Should be one of: {allowed_notice_types}')\n    if not doc:\n        lines = [no_doc_str]\n    else:\n        lines = _normalize_docstring(doc).splitlines()\n        lines[0] += ' ' + suffix_str\n    if not notice:\n        raise ValueError('The `notice` arg must not be empty.')\n    notice[0] = f'{notice_type}: {notice[0]}'\n    notice = [''] + notice + ([instructions] if instructions else [])\n    if len(lines) > 1:\n        if lines[1].strip():\n            notice.append('')\n        lines[1:1] = notice\n    else:\n        lines += notice\n    return '\\n'.join(lines)",
    "docstring": "Adds a deprecation notice to a docstring. Args: doc: The original docstring. instructions: A string, describing how to fix the problem. no_doc_str: The default value to use for if is empty. suffix_str: Is added to the end of the first line. notice: A list of strings. The main notice warning body. notice_type: The type of notice to use. Should be one of Returns: A new docstring, with the notice attached. Raises: ValueError: If is empty.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\decorator_utils.py",
    "ast_data": "FunctionDef name:add_notice_to_docstring arg:doc arg:instructions arg:no_doc_str arg:suffix_str arg:notice arg:notice_type arguments arg arg arg arg arg arg Assign If Compare Raise Call If Assign Assign Call Call If Raise Call Assign Assign If Compare Call If Call Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_ticks_direction",
    "source_code": "def get_ticks_direction(self, minor=False):\n    if minor:\n        return np.array([tick._tickdir for tick in self.get_minor_ticks()])\n    else:\n        return np.array([tick._tickdir for tick in self.get_major_ticks()])",
    "docstring": "Return an array of this Axis' tick directions. Parameters ---------- minor : bool, default: False True to return the minor tick directions, False to return the major tick directions. Returns ------- array of tick directions",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_ticks_direction arg:self arg:minor arguments arg arg If Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "show",
    "source_code": "def show(self, **kwargs) -> None:\n    import matplotlib.pyplot as plt\n    with theme_context(self._theme):\n        plt.show(**kwargs)",
    "docstring": "Display the plot by hooking into pyplot. This method calls :func: with any keyword parameters.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:show arg:self arguments arg arg With Call Call"
  },
  {
    "library": "pytorch",
    "name": "_make_closure_patcher",
    "source_code": "def _make_closure_patcher(self, **changes: dict[str, Any]) -> Any:\n    config = self._config\n\n    def change() -> Callable[[], None]:\n        prior = {k: config[k].user_override for k in changes}\n        for k, v in changes.items():\n            self._config[k].user_override = v\n\n        def revert() -> None:\n            for k, v in prior.items():\n                self._config[k].user_override = v\n        return revert\n    return change",
    "docstring": "A lower-overhead version of patch() for things on the critical path. Usage: # do this off the critical path change_fn = config.make_closure_patcher(foo=True) ... revert = change_fn() try: ... finally: revert()",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_config_module.py",
    "ast_data": "FunctionDef name:_make_closure_patcher arg:self arguments arg arg Assign FunctionDef name:change arguments Assign For Call Assign FunctionDef name:revert arguments For Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "gray",
    "source_code": "def gray() -> None:\n    set_cmap('gray')",
    "docstring": "Set the colormap to 'gray'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:gray arguments Call"
  },
  {
    "library": "scipy",
    "name": "aps05_f",
    "source_code": "def aps05_f(x):\n    return np.sin(x) - 1.0 / 2",
    "docstring": "Simple Trigonometric function",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:aps05_f arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_weighted_sum",
    "source_code": "def _create_weighted_sum(column, transformation_cache, state_manager, sparse_combiner, weight_var):\n    if isinstance(column, CategoricalColumn):\n        return _create_categorical_column_weighted_sum(column=column, transformation_cache=transformation_cache, state_manager=state_manager, sparse_combiner=sparse_combiner, weight_var=weight_var)\n    else:\n        return _create_dense_column_weighted_sum(column=column, transformation_cache=transformation_cache, state_manager=state_manager, weight_var=weight_var)",
    "docstring": "Creates a weighted sum for a dense/categorical column for linear_model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_create_weighted_sum arg:column arg:transformation_cache arg:state_manager arg:sparse_combiner arg:weight_var arguments arg arg arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "append_to",
    "source_code": "def append_to(self, extlib):\n    if is_sequence(extlib):\n        lib_name, build_info = extlib\n        dict_append(build_info, libraries=self.libraries, include_dirs=self.include_dirs)\n    else:\n        from numpy.distutils.core import Extension\n        assert isinstance(extlib, Extension), repr(extlib)\n        extlib.libraries.extend(self.libraries)\n        extlib.include_dirs.extend(self.include_dirs)",
    "docstring": "Append libraries, include_dirs to extension or library item.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:append_to arg:self arg:extlib arguments arg arg If Call Assign Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "previous_friday",
    "source_code": "def previous_friday(dt: datetime) -> datetime:\n    if dt.weekday() == 5:\n        return dt - timedelta(1)\n    elif dt.weekday() == 6:\n        return dt - timedelta(2)\n    return dt",
    "docstring": "If holiday falls on Saturday or Sunday, use previous Friday instead.",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:previous_friday arg:dt arguments arg If Compare Call Return return:yes Call If Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_calculate_scores",
    "source_code": "def _calculate_scores(self, query, key):\n    return NotImplementedError",
    "docstring": "Calculates attention scores. Args: query: Query tensor of shape . key: Key tensor of shape . Returns: Tensor of shape .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\dense_attention.py",
    "ast_data": "FunctionDef name:_calculate_scores arg:self arg:query arg:key arguments arg arg arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_adapted_sampling",
    "source_code": "def _adapted_sampling(shape: Union[Tuple[int, ...], torch.Size], dist: torch.distributions.Distribution, same_on_batch: Optional[bool]=False) -> Tensor:\n    if isinstance(shape, tuple):\n        shape = torch.Size(shape)\n    if same_on_batch:\n        return dist.sample(torch.Size((1, *shape[1:]))).repeat(shape[0], *[1] * (len(shape) - 1))\n    return dist.sample(shape)",
    "docstring": "Sample from a uniform sampling function that accepts 'same_on_batch'. If same_on_batch is True, all values generated will be exactly same given a batch_size (shape[0]). By default, same_on_batch is set to False.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_adapted_sampling arg:shape arg:dist arg:same_on_batch arguments arg arg arg If Call Assign Call If Return return:yes Call Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "find_control_points",
    "source_code": "def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):\n    cmx = 0.5 * (4 * mmx - (c1x + c2x))\n    cmy = 0.5 * (4 * mmy - (c1y + c2y))\n    return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]",
    "docstring": "Find control points of the Bézier curve passing through (*c1x*, *c1y*), (*mmx*, *mmy*), and (*c2x*, *c2y*), at parametric values 0, 0.5, and 1.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\bezier.py",
    "ast_data": "FunctionDef name:find_control_points arg:c1x arg:c1y arg:mmx arg:mmy arg:c2x arg:c2y arguments arg arg arg arg arg arg Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "split_training_and_validation_data",
    "source_code": "def split_training_and_validation_data(x, y, sample_weights, validation_split):\n    if has_symbolic_tensors(x):\n        raise ValueError('If your data is in the form of symbolic tensors, you cannot use `validation_split`.')\n    if hasattr(x[0], 'shape'):\n        split_at = int(x[0].shape[0] * (1.0 - validation_split))\n    else:\n        split_at = int(len(x[0]) * (1.0 - validation_split))\n    x, val_x = (generic_utils.slice_arrays(x, 0, split_at), generic_utils.slice_arrays(x, split_at))\n    y, val_y = (generic_utils.slice_arrays(y, 0, split_at), generic_utils.slice_arrays(y, split_at))\n    if sample_weights:\n        sample_weights, val_sample_weights = (generic_utils.slice_arrays(sample_weights, 0, split_at), generic_utils.slice_arrays(sample_weights, split_at))\n    else:\n        val_sample_weights = None\n    return (x, y, sample_weights, val_x, val_y, val_sample_weights)",
    "docstring": "Split input data into train/eval section based on validation_split.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:split_training_and_validation_data arg:x arg:y arg:sample_weights arg:validation_split arguments arg arg arg arg If Call Raise Call If Call Assign Call Assign Call Call Assign Call Call Assign Call Call If Assign Call Call Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "SlowDownError",
    "source_code": "class SlowDownError(OAuth2Error):\n    error = 'slow_down'",
    "docstring": "A variant of \"authorization_pending\", the authorization request is still pending and polling should continue, but the interval MUST be increased by 5 seconds for this and all subsequent requests.",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc8628\\errors.py",
    "ast_data": "ClassDef name:SlowDownError Assign"
  },
  {
    "library": "tensorflow",
    "name": "_call_batch_hook",
    "source_code": "def _call_batch_hook(self, mode, hook, batch, logs=None):\n    if not self.callbacks:\n        return\n    if hook == 'begin':\n        self._call_batch_begin_hook(mode, batch, logs)\n    elif hook == 'end':\n        self._call_batch_end_hook(mode, batch, logs)\n    else:\n        raise ValueError('Unrecognized hook: {}'.format(hook))",
    "docstring": "Helper function for all batch_{begin | end} methods.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_call_batch_hook arg:self arg:mode arg:hook arg:batch arg:logs arguments arg arg arg arg arg If Return return:no If Compare Call If Compare Call Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "infer_dtype_from_array",
    "source_code": "def infer_dtype_from_array(arr) -> tuple[DtypeObj, ArrayLike]:\n    if isinstance(arr, np.ndarray):\n        return (arr.dtype, arr)\n    if not is_list_like(arr):\n        raise TypeError(\"'arr' must be list-like\")\n    arr_dtype = getattr(arr, 'dtype', None)\n    if isinstance(arr_dtype, ExtensionDtype):\n        return (arr.dtype, arr)\n    elif isinstance(arr, ABCSeries):\n        return (arr.dtype, np.asarray(arr))\n    inferred = lib.infer_dtype(arr, skipna=False)\n    if inferred in ['string', 'bytes', 'mixed', 'mixed-integer']:\n        return (np.dtype(np.object_), arr)\n    arr = np.asarray(arr)\n    return (arr.dtype, arr)",
    "docstring": "Infer the dtype from an array. Parameters ---------- arr : array Returns ------- tuple (pandas-compat dtype, array) Examples -------- >>> np.asarray([1, \"1\"]) array(['1', '1'], dtype='>> infer_dtype_from_array([1, \"1\"]) (dtype('O'), [1, '1'])",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:infer_dtype_from_array arg:arr arguments arg If Call Return return:yes If Call Raise Call Assign Call If Call Return return:yes If Call Return return:yes Call Assign Call If Compare Return return:yes Call Assign Call Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "get_wheel",
    "source_code": "def get_wheel(distribution, version, for_py_version, search_dirs, download, app_data, do_periodic_update, env):\n    wheel = None\n    if not download or version != Version.bundle:\n        wheel = from_bundle(distribution, version, for_py_version, search_dirs, app_data, do_periodic_update, env)\n    if download and wheel is None and (version != Version.embed):\n        wheel = download_wheel(distribution=distribution, version_spec=Version.as_version_spec(version), for_py_version=for_py_version, search_dirs=search_dirs, app_data=app_data, to_folder=app_data.house, env=env)\n        if wheel is not None and app_data.can_update:\n            add_wheel_to_update_log(wheel, for_py_version, app_data)\n    return wheel",
    "docstring": "Get a wheel with the given distribution-version-for_py_version trio, by using the extra search dir + download.",
    "type": "function",
    "file_path": "virtualenv\\src\\virtualenv\\seed\\wheels\\acquire.py",
    "ast_data": "FunctionDef name:get_wheel arg:distribution arg:version arg:for_py_version arg:search_dirs arg:download arg:app_data arg:do_periodic_update arg:env arguments arg arg arg arg arg arg arg arg Assign If BoolOp Compare Assign Call If BoolOp Compare Compare Assign Call Call If BoolOp Compare Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_set_mappable_flags",
    "source_code": "def _set_mappable_flags(self):\n    edge0 = self._edge_is_mapped\n    face0 = self._face_is_mapped\n    self._edge_is_mapped = False\n    self._face_is_mapped = False\n    if self._A is not None:\n        if not cbook._str_equal(self._original_facecolor, 'none'):\n            self._face_is_mapped = True\n            if cbook._str_equal(self._original_edgecolor, 'face'):\n                self._edge_is_mapped = True\n        elif self._original_edgecolor is None:\n            self._edge_is_mapped = True\n    mapped = self._face_is_mapped or self._edge_is_mapped\n    changed = edge0 is None or face0 is None or self._edge_is_mapped != edge0 or (self._face_is_mapped != face0)\n    return mapped or changed",
    "docstring": "Determine whether edges and/or faces are color-mapped. This is a helper for update_scalarmappable. It sets Boolean flags '_edge_is_mapped' and '_face_is_mapped'. Returns ------- mapping_change : bool True if either flag is True, or if a flag has changed.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:_set_mappable_flags arg:self arguments arg Assign Assign Assign Assign If Compare If Call Assign If Call Assign If Compare Assign Assign BoolOp Assign BoolOp Compare Compare Compare Compare Return return:yes BoolOp"
  },
  {
    "library": "pytorch",
    "name": "_preload_cuda_deps",
    "source_code": "def _preload_cuda_deps(lib_folder: str, lib_name: str) -> None:\n    assert platform.system() == 'Linux', 'Should only be called on Linux'\n    lib_path = None\n    for path in sys.path:\n        candidate_lib_paths = _get_cuda_dep_paths(path, lib_folder, lib_name)\n        if candidate_lib_paths:\n            lib_path = candidate_lib_paths[0]\n            break\n    if not lib_path:\n        raise ValueError(f'{lib_name} not found in the system path {sys.path}')\n    ctypes.CDLL(lib_path)",
    "docstring": "Preloads cuda deps if they could not be found otherwise.",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_preload_cuda_deps arg:lib_folder arg:lib_name arguments arg arg Compare Call Assign For Assign Call If Assign If Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "summon_full_params",
    "source_code": "@staticmethod\n@contextlib.contextmanager\ndef summon_full_params(module: nn.Module, recurse: bool=True, writeback: bool=True, rank0_only: bool=False, offload_to_cpu: bool=False, with_grads: bool=False) -> Generator:\n    with _unshard_params(module, recurse, writeback, rank0_only, offload_to_cpu, with_grads):\n        yield",
    "docstring": "Expose full params for FSDP instances with this context manager. Can be useful *after* forward/backward for a model to get the params for additional processing or checking. It can take a non-FSDP module and will summon full params for all contained FSDP modules as well as their children, depending on the ``)",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:summon_full_params arg:module arg:recurse arg:writeback arg:rank0_only arg:offload_to_cpu arg:with_grads arguments arg arg arg arg arg arg With Call"
  },
  {
    "library": "scipy",
    "name": "MatVarReader",
    "source_code": "class MatVarReader:\n\n    def __init__(self, file_reader):\n        pass\n\n    def read_header(self):\n        pass\n\n    def array_from_header(self, header):\n        pass",
    "docstring": "Abstract class defining required interface for var readers",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py",
    "ast_data": "ClassDef name:MatVarReader FunctionDef name:__init__ arg:self arg:file_reader arguments arg arg FunctionDef name:read_header arg:self arguments arg FunctionDef name:array_from_header arg:self arg:header arguments arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "_free_energy",
    "source_code": "def _free_energy(self, v):\n    return -safe_sparse_dot(v, self.intercept_visible_) - np.logaddexp(0, safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_).sum(axis=1)",
    "docstring": "Computes the free energy F(v) = - log sum_h exp(-E(v,h)). Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer. Returns ------- free_energy : ndarray of shape (n_samples,) The value of the free energy.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_rbm.py",
    "ast_data": "FunctionDef name:_free_energy arg:self arg:v arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "MultiprocessWrapper",
    "source_code": "class MultiprocessWrapper:\n\n    def __init__(self, *args: Any, **kwargs: Any) -> None:\n        args = tuple((arg.clone() if isinstance(arg, torch.Tensor) else arg for arg in args))\n        kwargs = {key: val.clone() if isinstance(val, torch.Tensor) else val for key, val in kwargs.items()}\n        super().__init__(*args, **kwargs)",
    "docstring": "When used as a base class, makes the class work with the 'spawn' multiprocessing context.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "ClassDef name:MultiprocessWrapper FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call Call Call Assign Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_detect_per_channel_helper",
    "source_code": "def _detect_per_channel_helper(self, model: nn.Module):\n    per_channel_info: dict = {}\n    for fqn, module in model.named_modules():\n        is_in_include_list = any((isinstance(module, x) for x in self.supported_modules))\n        per_channel_supported = False\n        if is_in_include_list:\n            per_channel_supported = True\n            q_config_file = module.qconfig\n            assert isinstance(q_config_file, QConfig)\n            q_or_s_obj = module.qconfig.weight.p.func()\n            assert isinstance(q_or_s_obj, (FakeQuantize, ObserverBase))\n            per_channel_used = False\n            if hasattr(q_or_s_obj, 'ch_axis'):\n                if isinstance(q_or_s_obj, FakeQuantize):\n                    if hasattr(q_or_s_obj, 'is_per_channel') and q_or_s_obj.is_per_channel:\n                        per_channel_used = True\n                elif isinstance(q_or_s_obj, ObserverBase):\n                    per_channel_used = True\n                else:\n                    raise ValueError('Should be either observer or fake quant')\n            per_channel_info[fqn] = {self.PER_CHAN_SUPPORTED_KEY: per_channel_supported, self.PER_CHAN_USED_KEY: per_channel_used, self.BACKEND_KEY: self.backend_chosen}\n    return per_channel_info",
    "docstring": "determines if per_channel quantization is supported in modules and submodules. Returns a dictionary in the higher level _detect_per_channel function. Each entry maps the fully-qualified-name to information on whether per_channel quantization. Args: model: The current module that is being checked to see if it is per_channel quantizable Returns dictionary mapping fqns to if per_channel quantization is possible",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:_detect_per_channel_helper arg:self arg:model arguments arg arg For Call Assign Call Call Assign If Assign Assign Call Assign Call Call Assign If Call If Call If BoolOp Call Assign If Call Assign Raise Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_logger_dict",
    "source_code": "def get_logger_dict(mod: nn.Module, prefix: str='') -> dict[str, dict]:\n    torch._C._log_api_usage_once('quantization_api._numeric_suite.get_logger_dict')\n    target_dict: dict[str, dict] = {}\n    _get_logger_dict_helper(mod, target_dict, prefix)\n    return target_dict",
    "docstring": "Traverse the modules and save all logger stats into target dict. This is mainly used for quantization accuracy debug. Type of loggers supported: ShadowLogger: used to log the outputs of the quantized module and its matching float shadow module, OutputLogger: used to log the outputs of the modules Args: mod: module we want to save all logger stats prefix: prefix for the current module Return: target_dict: the dictionary used to save all logger stats",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite.py",
    "ast_data": "FunctionDef name:get_logger_dict arg:mod arg:prefix arguments arg arg Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "average_name",
    "source_code": "@doc_controls.do_not_generate_docs\ndef average_name(self, var):\n    if var.ref() in self._averages:\n        return self._averages[var.ref()].name[:-len(':0')]\n    return ops.get_default_graph().unique_name(var.name[:-len(':0')] + '/' + self.name, mark_as_used=False)",
    "docstring": "[Meant for TF1] Returns name of holding the average for . (Designed to work with legacy , it is sensitive to specific variable names and not recommended for TF2) The typical scenario for is to compute moving averages of variables during training, and restore the variables from the computed moving averages during evaluations. To restore variables, you have to know the name of the shadow variables. That name and the original variable can then be passed to a object to restore the variable from the moving average value with: can be called whether or not has been called. Args: var: A object. Returns: A string: The name of the variable that will be used or was used by the to hold the moving average of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\moving_averages.py",
    "ast_data": "FunctionDef name:average_name arg:self arg:var arguments arg arg If Compare Call Return return:yes Call Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "staff_member_required",
    "source_code": "def staff_member_required(view_func=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='admin:login'):\n    actual_decorator = user_passes_test(lambda u: u.is_active and u.is_staff, login_url=login_url, redirect_field_name=redirect_field_name)\n    if view_func:\n        return actual_decorator(view_func)\n    return actual_decorator",
    "docstring": "Decorator for views that checks that the user is logged in and is a staff member, redirecting to the login page if necessary.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\views\\decorators.py",
    "ast_data": "FunctionDef name:staff_member_required arg:view_func arg:redirect_field_name arg:login_url arguments arg arg arg Assign Call arguments arg BoolOp If Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "root_attributes",
    "source_code": "def root_attributes(self):\n    return {}",
    "docstring": "Return extra attributes to place on the root (i.e. feed/channel) element. Called from write().",
    "type": "method",
    "file_path": "django\\django\\utils\\feedgenerator.py",
    "ast_data": "FunctionDef name:root_attributes arg:self arguments arg Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "set_fontsize",
    "source_code": "def set_fontsize(self, s=None):\n    s = mpl._val_or_rc(s, 'legend.fontsize')\n    self.prop = FontProperties(size=s)\n    self.stale = True",
    "docstring": "Set the fontsize in points. If *s* is not given, reset to :rc:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:set_fontsize arg:self arg:s arguments arg arg Assign Call Assign Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_check_parameters",
    "source_code": "def _check_parameters(self, X):\n    self._check_weights_parameters()\n    self._check_means_parameters(X)\n    self._check_precision_parameters(X)\n    self._checkcovariance_prior_parameter(X)",
    "docstring": "Check that the parameters are well defined. Parameters ---------- X : array-like of shape (n_samples, n_features)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_check_parameters arg:self arg:X arguments arg arg Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "get_audiences",
    "source_code": "def get_audiences(self, client, user, scope) -> Union[str, list[str]]:\n    return client.get_client_id()",
    "docstring": "Return the audience for the token. By default this simply returns the client ID. Developers MAY re-implement this method to add extra audiences:: def get_audiences(self, client, user, scope): return [ client.get_client_id(), resource_server.get_id(), ]",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9068\\token.py",
    "ast_data": "FunctionDef name:get_audiences arg:self arg:client arg:user arg:scope arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "gen_all_reshape_possibilities",
    "source_code": "def gen_all_reshape_possibilities(list_of_dims, target):\n    all_possibilities = generate_all_int_dyn_dim_possibilities(list_of_dims)\n    all_constraints = []\n    for p in all_possibilities:\n        to_multiply = []\n        p = list(p)\n        for constraint in p:\n            assert isinstance(constraint, BinConstraintD)\n            if constraint.op == op_neq:\n                to_multiply.append(constraint.lhs)\n        if not to_multiply:\n            all_constraints.append(Conj(p))\n        elif len(to_multiply) < len(list_of_dims):\n            all_constraints.append(Conj(p + [is_target_div_by_dim(target, Prod(to_multiply))]))\n        else:\n            all_constraints.append(Conj(p + [BinConstraintD(Prod(list_of_dims), Prod(target), op_eq)]))\n    return Disj(all_constraints)",
    "docstring": "Consider all possibilities what the input dimensions could be (number or dynamic) Then generate the appropriate constraints using multiplication or mod depending on the possibility The possibilities we consider here are the cross product of being equal to dyn or not equal to dyn for the input. Target is fixed because at most one dimension could be dyn. We have different cases for this. Args: list_of_dims: The input list of dimensions target: The tensor we want to reshape to Returns: A disjunction of transformed reshape constraints",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:gen_all_reshape_possibilities arg:list_of_dims arg:target arguments arg arg Assign Call Assign For Assign Assign Call For Call If Compare Call If Call Call If Compare Call Call Call Call Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "next_stage_indices",
    "source_code": "def next_stage_indices(count: int, next_actions: list[Optional[_Action]]) -> list[int]:\n    seen: set[int] = set()\n    ret: list[int] = []\n    for a in next_actions:\n        if a is not None and a.stage_index not in seen:\n            seen.add(a.stage_index)\n            ret.append(a.stage_index)\n            if len(ret) == count:\n                break\n    return ret",
    "docstring": "Remove duplicates (same stage, different microbatch), find next 'count' stages that will do compute.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:next_stage_indices arg:count arg:next_actions arguments arg arg Call For If BoolOp Compare Compare Call Call If Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "convert",
    "source_code": "def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str) -> tuple[Index, Index]:\n    assert isinstance(values, np.ndarray), type(values)\n    index = RangeIndex(len(values))\n    return (index, index)",
    "docstring": "Convert the data from this selection to the appropriate pandas type. Parameters ---------- values : np.ndarray nan_rep : str encoding : str errors : str",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:convert arg:self arg:values arg:nan_rep arg:encoding arg:errors arguments arg arg arg arg arg Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, major, minor, patch, identifier_string, version_type):\n    self.major = major\n    self.minor = minor\n    self.patch = patch\n    self.identifier_string = identifier_string\n    self.version_type = version_type\n    self._update_string()",
    "docstring": "Constructor. Args: major: major string eg. (1) minor: minor string eg. (3) patch: patch string eg. (1) identifier_string: extension string eg. (-rc0) version_type: version parameter ((SNAPSHOT|NIGHTLY)_VERSION)",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\update_version.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:major arg:minor arg:patch arg:identifier_string arg:version_type arguments arg arg arg arg arg arg Assign Assign Assign Assign Assign Call"
  },
  {
    "library": "scrapy",
    "name": "request",
    "source_code": "def request(self, method: bytes, uri: bytes, headers: TxHeaders | None=None, bodyProducer: IBodyProducer | None=None) -> Deferred[TxResponse]:\n    return self._requestWithEndpoint(key=('http-proxy', self._proxyURI.host, self._proxyURI.port), endpoint=self._getEndpoint(self._proxyURI), method=method, parsedURI=URI.fromBytes(uri), headers=headers, bodyProducer=bodyProducer, requestPath=uri)",
    "docstring": "Issue a new request via the configured proxy.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\downloader\\handlers\\http11.py",
    "ast_data": "FunctionDef name:request arg:self arg:method arg:uri arg:headers arg:bodyProducer arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "replace_gradient_components",
    "source_code": "def replace_gradient_components(self, value, component_grads):\n    return component_grads",
    "docstring": "Replaces the gradient components in with . The gradient of a ResourceVariable is either None or a Tensor. So we don't need 's TypeSpec or non-gradient components in this method. Args: value: A with its gradient components compatible with . component_grads: A or None as the gradient result. Returns: The , which is either a or None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:replace_gradient_components arg:self arg:value arg:component_grads arguments arg arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_base_actions",
    "source_code": "def _get_base_actions(self):\n    actions = []\n    base_actions = (self.get_action(action) for action in self.actions or [])\n    base_actions = [action for action in base_actions if action]\n    base_action_names = {name for _, name, _ in base_actions}\n    for name, func in self.admin_site.actions:\n        if name in base_action_names:\n            continue\n        description = self._get_action_description(func, name)\n        actions.append((func, name, description))\n    actions.extend(base_actions)\n    return actions",
    "docstring": "Return the list of actions, prior to any request-based filtering.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:_get_base_actions arg:self arguments arg Assign Assign Call BoolOp Assign Assign For If Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ShuffleAndRepeatDataset",
    "source_code": "class _ShuffleAndRepeatDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, buffer_size, count=None, seed=None):\n        self._input_dataset = input_dataset\n        self._buffer_size = ops.convert_to_tensor(buffer_size, dtype=dtypes.int64, name='buffer_size')\n        if count is None:\n            self._count = constant_op.constant(-1, dtype=dtypes.int64, name='count')\n        else:\n            self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name='count')\n        self._seed, self._seed2 = random_seed.get_seed(seed)\n        variant_tensor = gen_dataset_ops.shuffle_and_repeat_dataset(self._input_dataset._variant_tensor, buffer_size=self._buffer_size, count=self._count, seed=self._seed, seed2=self._seed2, **self._flat_structure)\n        super(_ShuffleAndRepeatDataset, self).__init__(input_dataset, variant_tensor)",
    "docstring": "A that fuses and .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\shuffle_ops.py",
    "ast_data": "ClassDef name:_ShuffleAndRepeatDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:buffer_size arg:count arg:seed arguments arg arg arg arg arg Assign Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_create_chunk_dtensor",
    "source_code": "def _create_chunk_dtensor(tensor: torch.Tensor, rank: int, device_mesh: DeviceMesh) -> DTensor:\n    tensor = tensor.detach().clone()\n    replicate_placements = [Replicate() for _ in range(device_mesh.ndim)]\n    shard_placements = [Replicate() for _ in range(device_mesh.ndim)]\n    shard_placements[-1] = DShard(0)\n    return DTensor.from_local(tensor, device_mesh, replicate_placements, run_check=False).redistribute(placements=shard_placements)",
    "docstring": "Shard a tensor to chunks along the first dimension. The local rank will gets its corresponding chunk as the local tensor to create a DTensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_shard_utils.py",
    "ast_data": "FunctionDef name:_create_chunk_dtensor arg:tensor arg:rank arg:device_mesh arguments arg arg arg Assign Call Call Assign Call Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pygame",
    "name": "is_msys2",
    "source_code": "def is_msys2():\n    return 'MSYSTEM' in os.environ and re.match('MSYS|MINGW.*|CLANG.*|UCRT.*', os.environ['MSYSTEM'])",
    "docstring": "Return true if this in an MSYS2 build",
    "type": "function",
    "file_path": "pygame\\buildconfig\\config.py",
    "ast_data": "FunctionDef name:is_msys2 arguments Return return:yes BoolOp Compare Call"
  },
  {
    "library": "kornia",
    "name": "get_transformation_matrix",
    "source_code": "def get_transformation_matrix(self, input: Tensor, params: Optional[List[ParamItem]]=None, recompute: bool=False, extra_args: Optional[Dict[str, Any]]=None) -> Tensor:\n    if params is None:\n        raise NotImplementedError('requires params to be provided.')\n    named_modules: Iterator[Tuple[str, Module]] = self.get_forward_sequence(params)\n    res_mat: Tensor = self.identity_matrix(_transform_input(input))\n    for (_, module), param in zip(named_modules, params if params is not None else []):\n        module = cast(OperationBase, module)\n        if isinstance(module.op, (K.GeometricAugmentationBase2D,)) and isinstance(param.data, dict):\n            ori_shape = input.shape\n            input = module.op.transform_tensor(input)\n            if recompute:\n                flags = override_parameters(module.op.flags, extra_args, in_place=False)\n                mat = module.op.generate_transformation_matrix(input, param.data, flags)\n            elif module.op._transform_matrix is not None:\n                mat = as_tensor(module.transform_matrix, device=input.device, dtype=input.dtype)\n            else:\n                raise RuntimeError(f'{module}.transform_matrix is None while `recompute=False`.')\n            res_mat = mat @ res_mat\n            input = module.op.transform_output_tensor(input, ori_shape)\n            if module.op.keepdim and ori_shape != input.shape:\n                res_mat = res_mat.squeeze()\n    return res_mat",
    "docstring": "Compute the transformation matrix according to the provided parameters. Args: input: the input tensor. params: params for the sequence. recompute: if to recompute the transformation matrix according to the params. default: False. extra_args: Optional dictionary of extra arguments with specific options for different input types.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\policy.py",
    "ast_data": "FunctionDef name:get_transformation_matrix arg:self arg:input arg:params arg:recompute arg:extra_args arguments arg arg arg arg arg If Compare Raise Call Call Call Call For Call Compare Assign Call If BoolOp Call Call Assign Assign Call If Assign Call Assign Call If Compare Assign Call Raise Call Assign Assign Call If BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "LayoutEngine",
    "source_code": "class LayoutEngine:\n    _adjust_compatible = None\n    _colorbar_gridspec = None\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self._params = {}\n\n    def set(self, **kwargs):\n        raise NotImplementedError\n\n    @property\n    def colorbar_gridspec(self):\n        if self._colorbar_gridspec is None:\n            raise NotImplementedError\n        return self._colorbar_gridspec\n\n    @property\n    def adjust_compatible(self):\n        if self._adjust_compatible is None:\n            raise NotImplementedError\n        return self._adjust_compatible\n\n    def get(self):\n        return dict(self._params)\n\n    def execute(self, fig):\n        raise NotImplementedError",
    "docstring": "Base class for Matplotlib layout engines. A layout engine can be passed to a figure at instantiation or at any time with . Once attached to a figure, the layout engine `~.figure.Figure.draw~.figure.Figure.set_layout_engineLayoutEngine.Figure.colorbar.colorbar.make_axes_gridspec.colorbar.make_axes.Figure.subplots_adjustLayoutEngineLayoutEngine.setLayoutEngine.execute` with your implementation",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py",
    "ast_data": "ClassDef name:LayoutEngine Assign Assign FunctionDef name:__init__ arg:self arguments arg arg Call Call Assign FunctionDef name:set arg:self arguments arg arg Raise FunctionDef name:colorbar_gridspec arg:self arguments arg If Compare Raise Return return:yes FunctionDef name:adjust_compatible arg:self arguments arg If Compare Raise Return return:yes FunctionDef name:get arg:self arguments arg Return return:yes Call FunctionDef name:execute arg:self arg:fig arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "_save_ddp_bucket_info",
    "source_code": "def _save_ddp_bucket_info(bucket: dist.GradBucket, zero: ZeroRedundancyOptimizer):\n    overlap_info = zero._overlap_info\n    bucket_params = bucket.parameters()\n    assert len(bucket_params) > 0, 'Empty bucket'\n    overlap_info.params_per_bucket.append(bucket_params)\n    if overlap_info.shard_buckets:\n        bucket_size = 0\n        for param in bucket_params:\n            bucket_size += param.numel()\n        assert overlap_info.total_size is not None\n        overlap_info.total_size += bucket_size",
    "docstring": "Save :class: gradient bucket information for :class: instance `ZeroRedundancyOptimizer` instance.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\ddp_zero_hook.py",
    "ast_data": "FunctionDef name:_save_ddp_bucket_info arg:bucket arg:zero arguments arg arg Assign Assign Call Compare Call Call If Assign For Call Compare"
  },
  {
    "library": "sphinx",
    "name": "DefaultRole",
    "source_code": "class DefaultRole(SphinxDirective):\n    optional_arguments = 1\n    final_argument_whitespace = False\n\n    def run(self) -> list[Node]:\n        if not self.arguments:\n            docutils.unregister_role('')\n            return []\n        role_name = self.arguments[0]\n        role, messages = roles.role(role_name, self.state_machine.language, self.lineno, self.state.reporter)\n        if role:\n            docutils.register_role('', role)\n            self.env.current_document.default_role = role_name\n        else:\n            literal_block = nodes.literal_block(self.block_text, self.block_text)\n            reporter = self.state.reporter\n            error = reporter.error('Unknown interpreted text role \"%s\".' % role_name, literal_block, line=self.lineno)\n            messages += [error]\n        return cast('list[nodes.Node]', messages)",
    "docstring": "Set the default interpreted text role. Overridden from docutils.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\directives\\__init__.py",
    "ast_data": "ClassDef name:DefaultRole Assign Assign FunctionDef name:run arg:self arguments arg If Call Return return:no Assign Assign Call If Call Assign Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_values_for_argsort",
    "source_code": "def _values_for_argsort(self) -> np.ndarray:\n    return np.array(self)",
    "docstring": "Return values for sorting. Returns ------- ndarray The transformed values should maintain the ordering between values within the array. See Also -------- ExtensionArray.argsort : Return the indices that would sort this array. Notes ----- The caller is responsible for *not* modifying these values in-place, so it is safe for implementers to give views on ``: >>> arr = pd.array([1, 2, 3]) >>> arr._values_for_argsort() array([1, 2, 3])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_values_for_argsort arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_input_at",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef get_input_at(self, node_index):\n    return self._get_node_attribute_at_index(node_index, 'input_tensors', 'input')",
    "docstring": "Retrieves the input tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first input node of the layer. Returns: A tensor (or list of tensors if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:get_input_at arg:self arg:node_index arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "getmembers",
    "source_code": "def getmembers(object, predicate=None):\n    return _inspect.getmembers(object, predicate)",
    "docstring": "TFDecorator-aware replacement for inspect.getmembers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:getmembers arg:object arg:predicate arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "symptoms",
    "source_code": "def symptoms(self):\n    return self._get_tpu_property('symptoms')",
    "docstring": "Return Cloud TPU Symptoms of the TPU.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:symptoms arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "handle",
    "source_code": "def handle(self, op, args, kwargs):\n    if any((isinstance(x, keras_tensor.KerasTensor) for x in nest.flatten([args, kwargs]))):\n        return TFOpLambda(op)(*args, **kwargs)\n    else:\n        return self.NOT_SUPPORTED",
    "docstring": "Handle the specified operation with the specified arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "FunctionDef name:handle arg:self arg:op arg:args arg:kwargs arguments arg arg arg arg If Call Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_allgather_state_info",
    "source_code": "def _allgather_state_info(fsdp_state: _FSDPState, input_states: dict[str, Any]) -> list[dict[str, StateInfo]]:\n    processed_state_dict: dict[str, StateInfo] = {}\n    gathered_state_info: list[dict[str, StateInfo]] = [{} for _ in range(fsdp_state.world_size)]\n    for fqn, optim_state in input_states.items():\n        processed_state = StateInfo({}, {}, {})\n        for state_name, value in sorted_items(optim_state):\n            if torch.is_tensor(value):\n                if value.dim() == 0:\n                    processed_state.scalar_tensors[state_name] = value.cpu()\n                else:\n                    processed_state.tensors[state_name] = _PosDimTensorInfo(value.shape, value.dtype)\n            else:\n                processed_state.non_tensors[state_name] = value\n        processed_state_dict[fqn] = processed_state\n    dist.all_gather_object(gathered_state_info, processed_state_dict, group=fsdp_state.process_group)\n    return gathered_state_info",
    "docstring": "Given the ``, allgather StateInfo for each state. The function uses all_gather_object to gather StateInfo so no GPU tensors are sent.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py",
    "ast_data": "FunctionDef name:_allgather_state_info arg:fsdp_state arg:input_states arguments arg arg Call For Call Assign Call For Call If Call If Compare Call Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "copy",
    "source_code": "def copy(self, file, mode: str='w', propindexes: bool=True, keys=None, complib=None, complevel: int | None=None, fletcher32: bool=False, overwrite: bool=True) -> HDFStore:\n    new_store = HDFStore(file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32)\n    if keys is None:\n        keys = list(self.keys())\n    if not isinstance(keys, (tuple, list)):\n        keys = [keys]\n    for k in keys:\n        s = self.get_storer(k)\n        if s is not None:\n            if k in new_store:\n                if overwrite:\n                    new_store.remove(k)\n            data = self.select(k)\n            if isinstance(s, Table):\n                index: bool | list[str] = False\n                if propindexes:\n                    index = [a.name for a in s.axes if a.is_indexed]\n                new_store.append(k, data, index=index, data_columns=getattr(s, 'data_columns', None), encoding=s.encoding)\n            else:\n                new_store.put(k, data, encoding=s.encoding)\n    return new_store",
    "docstring": "Copy the existing store to a new file, updating in place. Parameters ---------- propindexes : bool, default True Restore indexes in copied file. keys : list, optional List of keys to include in the copy (defaults to all). overwrite : bool, default True Whether to overwrite (remove and replace) existing nodes in the new store. mode, complib, complevel, fletcher32 same as in HDFStore.__init__ Returns ------- open file handle of the new store",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:copy arg:self arg:file arg:mode arg:propindexes arg:keys arg:complib arg:complevel arg:fletcher32 arg:overwrite arguments arg arg arg arg arg arg arg arg arg Assign Call If Compare Assign Call Call If Call Assign For Assign Call If Compare If Compare If Call Assign Call If Call If Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "promo_mode_enum_to_string",
    "source_code": "def promo_mode_enum_to_string(promo_safety_mode_enum) -> str:\n    if promo_safety_mode_enum == PromoMode.OFF:\n        return 'off'\n    if promo_safety_mode_enum == PromoMode.LEGACY:\n        return 'legacy'\n    elif promo_safety_mode_enum == PromoMode.SAFE:\n        return 'safe'\n    elif promo_safety_mode_enum == PromoMode.ALL:\n        return 'all'\n    else:\n        raise ValueError(f'The provided promotion mode {promo_safety_mode_enum} does not exist.')",
    "docstring": "Returns the corresponding PromoMode string value from PromoMode enum.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:promo_mode_enum_to_string arg:promo_safety_mode_enum arguments arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "load_iris",
    "source_code": "@validate_params({'return_X_y': ['boolean'], 'as_frame': ['boolean']}, prefer_skip_nested_validation=True)\ndef load_iris(*, return_X_y=False, as_frame=False):\n    data_file_name = 'iris.csv'\n    data, target, target_names, fdescr = load_csv_data(data_file_name=data_file_name, descr_file_name='iris.rst')\n    feature_names = ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']\n    frame = None\n    target_columns = ['target']\n    if as_frame:\n        frame, data, target = _convert_data_dataframe('load_iris', data, target, feature_names, target_columns)\n    if return_X_y:\n        return (data, target)\n    return Bunch(data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, filename=data_file_name, data_module=DATA_MODULE)",
    "docstring": "Load and return the iris dataset (classification). The iris dataset is a classic and very easy multi-class classification dataset. ================= ============== Classes 3 Samples per class 50 Samples total 150 Dimensionality 4 Features real, positive ================= ============== Read more in the :ref:. .. versionchanged:: 0.20 Fixed two wrong data points according to Fisher's paper. The new version is the same as in R, but not as in the UCI Machine Learning Repository. Parameters ---------- return_X_y : bool, default=False If True, returns `datatargetreturn_X_ydatatarget~sklearn.utils.Bunchas_frame=Truedataas_frame=Truetargetas_frame=Truedatatargetsphx_glr_auto_examples_decomposition_plot_pca_iris.py` for a more detailed example of how to work with the iris dataset.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_base.py",
    "ast_data": "FunctionDef name:load_iris arguments arg arg Assign Assign Call Assign Assign Assign If Assign Call If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_node_submodule_map",
    "source_code": "def get_node_submodule_map(self) -> dict[str, str]:\n    return self._node_submodule_map",
    "docstring": "Returns a map from node name to submodule name, e.g. node: main_module_impl_impl_over_arch_unary_multiple_embedding _pooling_embedding_pooling_sparse_entity_equivalence_key _proxy_embedding_bag maps to submodule name of: _run_on_acc_1",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py",
    "ast_data": "FunctionDef name:get_node_submodule_map arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, node_def, op, message, *args):\n    super(InternalError, self).__init__(node_def, op, message, INTERNAL, *args)",
    "docstring": "Creates an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "_threshold_mgc_map",
    "source_code": "def _threshold_mgc_map(stat_mgc_map, samp_size):\n    m, n = stat_mgc_map.shape\n    per_sig = 1 - 0.02 / samp_size\n    threshold = samp_size * (samp_size - 3) / 4 - 1 / 2\n    threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1\n    threshold = max(threshold, stat_mgc_map[m - 1][n - 1])\n    sig_connect = stat_mgc_map > threshold\n    if np.sum(sig_connect) > 0:\n        sig_connect, _ = _measurements.label(sig_connect)\n        _, label_counts = np.unique(sig_connect, return_counts=True)\n        max_label = np.argmax(label_counts[1:]) + 1\n        sig_connect = sig_connect == max_label\n    else:\n        sig_connect = np.array([[False]])\n    return sig_connect",
    "docstring": "Finds a connected region of significance in the MGC-map by thresholding. Parameters ---------- stat_mgc_map : ndarray All local correlations within ``. samp_size : int The sample size of original data. Returns ------- sig_connect : ndarray A binary matrix with 1's indicating the significant region.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mgc.py",
    "ast_data": "FunctionDef name:_threshold_mgc_map arg:stat_mgc_map arg:samp_size arguments arg arg Assign Assign Assign Assign Call Assign Call Assign Compare If Compare Call Assign Call Assign Call Assign Call Assign Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reopen",
    "source_code": "def reopen(self):\n    if self._closed:\n        self._initialize()\n        self._closed = False",
    "docstring": "Reopens the EventFileWriter. Can be called after to add more events in the same directory. The events will go into a new events file. Does nothing if the EventFileWriter was not closed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py",
    "ast_data": "FunctionDef name:reopen arg:self arguments arg If Call Assign"
  },
  {
    "library": "scipy",
    "name": "set_smoothing_factor",
    "source_code": "def set_smoothing_factor(self, s):\n    data = self._data\n    if data[6] == -1:\n        warnings.warn('smoothing factor unchanged forLSQ spline with fixed knots', stacklevel=2)\n        return\n    args = data[:6] + (s,) + data[7:]\n    with FITPACK_LOCK:\n        data = dfitpack.fpcurf1(*args)\n    if data[-1] == 1:\n        data = self._reset_nest(data)\n    self._data = data\n    self._reset_class()",
    "docstring": "Continue spline computation with the given smoothing factor s and with the knots found at the last call. This routine modifies the spline in place.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "FunctionDef name:set_smoothing_factor arg:self arg:s arguments arg arg Assign If Compare Call Return return:no Assign With Assign Call If Compare Assign Call Assign Call"
  },
  {
    "library": "django",
    "name": "get_table_description",
    "source_code": "def get_table_description(self, cursor, table_name):\n    cursor.execute(\"\\n            SELECT\\n                a.attname AS column_name,\\n                NOT (a.attnotnull OR (t.typtype = 'd' AND t.typnotnull)) AS is_nullable,\\n                pg_get_expr(ad.adbin, ad.adrelid) AS column_default,\\n                CASE WHEN collname = 'default' THEN NULL ELSE collname END AS collation,\\n                a.attidentity != '' AS is_autofield,\\n                col_description(a.attrelid, a.attnum) AS column_comment\\n            FROM pg_attribute a\\n            LEFT JOIN pg_attrdef ad ON a.attrelid = ad.adrelid AND a.attnum = ad.adnum\\n            LEFT JOIN pg_collation co ON a.attcollation = co.oid\\n            JOIN pg_type t ON a.atttypid = t.oid\\n            JOIN pg_class c ON a.attrelid = c.oid\\n            JOIN pg_namespace n ON c.relnamespace = n.oid\\n            WHERE c.relkind IN ('f', 'm', 'p', 'r', 'v')\\n                AND c.relname = %s\\n                AND n.nspname NOT IN ('pg_catalog', 'pg_toast')\\n                AND pg_catalog.pg_table_is_visible(c.oid)\\n        \", [table_name])\n    field_map = {line[0]: line[1:] for line in cursor.fetchall()}\n    cursor.execute('SELECT * FROM %s LIMIT 1' % self.connection.ops.quote_name(table_name))\n    return [FieldInfo(line.name, line.type_code, line.internal_size if line.display_size is None else line.display_size, line.internal_size, line.precision, line.scale, *field_map[line.name]) for line in cursor.description]",
    "docstring": "Return a description of the table with the DB-API cursor.description interface.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\postgresql\\introspection.py",
    "ast_data": "FunctionDef name:get_table_description arg:self arg:cursor arg:table_name arguments arg arg arg Call Assign Call Call Call Return return:yes Call Compare"
  },
  {
    "library": "django",
    "name": "non_form_errors",
    "source_code": "def non_form_errors(self):\n    if self._non_form_errors is None:\n        self.full_clean()\n    return self._non_form_errors",
    "docstring": "Return an ErrorList of errors that aren't associated with a particular form -- i.e., from formset.clean(). Return an empty ErrorList if there are none.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:non_form_errors arg:self arguments arg If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_backend",
    "source_code": "def get_backend(self) -> str:\n    return self._backend_name",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:get_backend arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "check_and_add_duplicate_pattern",
    "source_code": "def check_and_add_duplicate_pattern(pattern: PatternExpr, graph: Optional[torch.fx.Graph], seen_patterns: dict[str, list[Optional[str]]], skip_duplicates: bool=False) -> bool:\n    pattern_repr = PatternPrettyPrinter.run(pattern)\n    equiv_pattern_reprs = seen_patterns.get(pattern_repr)\n    if not equiv_pattern_reprs:\n        seen_patterns[pattern_repr].append(str(graph) if graph else None)\n        return False\n    if graph is None:\n        if skip_duplicates:\n            return True\n        torch._check(False, lambda: f'Duplicate pattern: {pattern_repr} with no graph')\n    new_graph_str = str(graph)\n    for graph_str in equiv_pattern_reprs:\n        if not new_graph_str == graph_str:\n            continue\n        if skip_duplicates:\n            return True\n        torch._check(False, lambda: f'Duplicate pattern: {pattern_repr} with duplicated match graph {graph_str} ')\n    equiv_pattern_reprs.append(new_graph_str)\n    return False",
    "docstring": "Check if a pattern is a duplicate. Because we ignore certain types in searching, but not in matching, use the graph to distinguish equivalent search patterns. Returns True if a duplicate is found and is passed in. Errors if is False and a duplicate is found.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "FunctionDef name:check_and_add_duplicate_pattern arg:pattern arg:graph arg:seen_patterns arg:skip_duplicates arguments arg arg arg arg Assign Call Assign Call If Call Call Return return:yes If Compare If Return return:yes Call arguments Assign Call For If Compare If Return return:yes Call arguments Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "save_counter",
    "source_code": "@property\ndef save_counter(self):\n    self._maybe_create_save_counter()\n    return self._save_counter",
    "docstring": "An integer variable which starts at zero and is incremented on save. Used to number checkpoints. Returns: The save counter variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:save_counter arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_rename_parameter",
    "source_code": "def _rename_parameter(old_name, new_name, dep_version=None):\n\n    def decorator(fun):\n\n        @functools.wraps(fun)\n        def wrapper(*args, **kwargs):\n            if old_name in kwargs:\n                if dep_version:\n                    end_version = dep_version.split('.')\n                    end_version[1] = str(int(end_version[1]) + 2)\n                    end_version = '.'.join(end_version)\n                    message = f'Use of keyword argument `{old_name}` is deprecated and replaced by `{new_name}`.  Support for `{old_name}` will be removed in SciPy {end_version}.'\n                    warnings.warn(message, DeprecationWarning, stacklevel=2)\n                if new_name in kwargs:\n                    message = f'{fun.__name__}() got multiple values for argument now known as `{new_name}`'\n                    raise TypeError(message)\n                kwargs[new_name] = kwargs.pop(old_name)\n            return fun(*args, **kwargs)\n        return wrapper\n    return decorator",
    "docstring": "Generate decorator for backward-compatible keyword renaming. Apply the decorator generated by to functions with a recently renamed parameter to maintain backward-compatibility. After decoration, the function behaves as follows: If only the new parameter is passed into the function, behave as usual. If only the old parameter is passed into the function (as a keyword), raise a DeprecationWarning if is provided, and behave as usual otherwise. If both old and new parameters are passed into the function, raise a DeprecationWarning if is provided, and raise the appropriate TypeError (function got multiple values for argument). Parameters ---------- old_name : str Old name of parameter new_name : str New name of parameter dep_version : str, optional Version of SciPy in which old parameter was deprecated in the format 'X.Y.Z'. If supplied, the deprecation message will indicate that support for the old parameter will be removed in version 'X.Y+2.Z' Notes ----- Untested with functions that accept *args. Probably won't work as written.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_rename_parameter arg:old_name arg:new_name arg:dep_version arguments arg arg arg FunctionDef name:decorator arg:fun arguments arg FunctionDef name:wrapper arguments arg arg If Compare If Assign Call Assign Call Call Assign Call Assign Call If Compare Assign Raise Call Assign Call Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_local_fwd_input",
    "source_code": "def set_local_fwd_input(self, prev_stage_outputs: Any, mb_index: int) -> None:\n    recv_infos: tuple[InputInfo, ...] = self.args_recv_info[mb_index]\n    prev_stage_outputs = _normalize_model_output_as_tuple(prev_stage_outputs)\n    for info, tensor in zip(recv_infos, prev_stage_outputs):\n        assert isinstance(tensor, torch.Tensor), f'expected tensor values as outputs from prev stage, got {type(tensor)}'\n        assert isinstance(info, _RecvInfo), 'set_local_Fwd_input should only be called on non-first stage, which should always have RecvInfo'\n        info.buffer = tensor.detach().requires_grad_(True)",
    "docstring": "Moves 'prev_stage_outputs' from another stage on the same rank into place as inputs for this stage. Avoids copying tensor data or using send/recv op. Detaches original tensor and sets requires_grad so the tensor can serve as a leaf for autograd and gradients can be collected from it during backward.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:set_local_fwd_input arg:self arg:prev_stage_outputs arg:mb_index arguments arg arg arg Assign Call For Call Call Call Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "wait",
    "source_code": "def wait(self) -> None:\n    torch._C._mps_waitForEvent(self.__eventId)",
    "docstring": "Makes all future work submitted to the default stream wait for this event.",
    "type": "method",
    "file_path": "pytorch\\torch\\mps\\event.py",
    "ast_data": "FunctionDef name:wait arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, o):\n    if not self.available(o):\n        raise ValueError('already locked')\n    self._owner = o",
    "docstring": "Reserve the lock for *o*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:o arguments arg arg If Call Raise Call Assign"
  },
  {
    "library": "django",
    "name": "get_del_batches",
    "source_code": "def get_del_batches(self, objs, fields):\n    conn_batch_size = max(connections[self.using].ops.bulk_batch_size(fields, objs), 1)\n    if len(objs) > conn_batch_size:\n        return [objs[i:i + conn_batch_size] for i in range(0, len(objs), conn_batch_size)]\n    else:\n        return [objs]",
    "docstring": "Return the objs in suitably sized batches for the used connection.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\deletion.py",
    "ast_data": "FunctionDef name:get_del_batches arg:self arg:objs arg:fields arguments arg arg arg Assign Call Call If Compare Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_matrix",
    "source_code": "def get_matrix(self):\n    if self._invalid:\n        self._inverted = None\n        self._invalid = 0\n    return self._mtx",
    "docstring": "Get the underlying transformation matrix as a 3x3 array:: a c e b d f 0 0 1 .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:get_matrix arg:self arguments arg If Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_indexer_non_comparable",
    "source_code": "@final\ndef _get_indexer_non_comparable(self, target: Index, method, unique: bool=True) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n    if method is not None:\n        other_dtype = _unpack_nested_dtype(target)\n        raise TypeError(f'Cannot compare dtypes {self.dtype} and {other_dtype}')\n    no_matches = -1 * np.ones(target.shape, dtype=np.intp)\n    if unique:\n        return no_matches\n    else:\n        missing = np.arange(len(target), dtype=np.intp)\n        return (no_matches, missing)",
    "docstring": "Called from get_indexer or get_indexer_non_unique when the target is of a non-comparable dtype. For get_indexer lookups with method=None, get_indexer is an _equality_ check, so non-comparable dtypes mean we will always have no matches. For get_indexer lookups with a method, get_indexer is an _inequality_ check, so non-comparable dtypes mean we will always raise TypeError. Parameters ---------- target : Index method : str or None unique : bool, default True * True if called from get_indexer. * False if called from get_indexer_non_unique. Raises ------ TypeError If doing an inequality check, i.e. method is not None.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_get_indexer_non_comparable arg:self arg:target arg:method arg:unique arguments arg arg arg arg If Compare Assign Call Raise Call Assign Call If Return return:yes Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "trace_function",
    "source_code": "def trace_function(args=None, kwargs=None, tracing_options=None):\n    if not tracing_options:\n        tracing_options = TracingOptions()\n    args = args if args else ()\n    kwargs = kwargs if kwargs else {}\n    if tracing_options.input_signature and (args or kwargs):\n        bound_args = function_type_utils.bind_function_inputs(args, kwargs, tracing_options.polymorphic_type, tracing_options.default_values)\n        args, kwargs = (bound_args.args, bound_args.kwargs)\n    with tracing_options.lock or contextlib.nullcontext():\n        if tracing_options.input_signature and (not args) and (not kwargs):\n            args = tracing_options.input_signature\n            kwargs = {}\n        concrete_function = _maybe_define_function(args, kwargs, tracing_options)\n    if not tracing_options.bind_graph_to_function:\n        concrete_function._garbage_collector.release()\n    return concrete_function",
    "docstring": "Returns a specialized to inputs and execution context. Compiles a Graph corresponding to the Python function logic and uses that to generate a differentiable ConcreteFunction. Args: args: inputs to specialize on. Can be concrete values (e.g. 1) or or . kwargs: keyword inputs to specialize on. Concrete values (e.g. 1) or or . tracing_options: TracingOptions for the tracing process.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\tracing_compilation.py",
    "ast_data": "FunctionDef name:trace_function arg:args arg:kwargs arg:tracing_options arguments arg arg arg If Assign Call Assign Assign If BoolOp BoolOp Assign Call Assign With BoolOp Call If BoolOp Assign Assign Assign Call If Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, key):\n    for d in reversed(self.dicts):\n        if key in d:\n            return d[key]\n    raise KeyError(key)",
    "docstring": "Get a variable's value, starting at the current context and going upward",
    "type": "method",
    "file_path": "django\\django\\template\\context.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:key arguments arg arg For Call If Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, activation: torch.Tensor) -> torch.FloatTensor:\n    assert activation.dim() == 2\n    weight_rows = self.weight_transposed.size()[0]\n    weight_cols = self.weight_transposed.size()[1]\n    decomposed_weight: npt.NDArray = np.empty(shape=(weight_rows, weight_cols), dtype=object)\n    for row in range(weight_rows):\n        for col in range(weight_cols):\n            decomposed_weight[row][col] = self.decompose_APoT(bin(self.weight_transposed[row][col]))\n    result = self.matmul(decomposed_weight, activation).type(torch.FloatTensor)\n    return result",
    "docstring": "Multiply APoT quantized weight and uniformly quantized activation (dtype: quint8) with bitshifting instead of matrix multiplication. Result has dtype torch.float32 Args: activation (Tensor): uniformly quantized activation tensor",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\experimental\\linear.py",
    "ast_data": "FunctionDef name:forward arg:self arg:activation arguments arg arg Compare Call Assign Call Assign Call Call For Call For Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "display",
    "source_code": "@property\ndef display(self) -> DisplayConfig:\n    return self._display",
    "docstring": "Dictionary of parameters for rich display in Jupyter notebook. Valid parameters: - format (\"png\" or \"svg\"): Image format to produce - scaling (float): Relative scaling of embedded image - hidpi (bool): When True, double the DPI while preserving the size",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:display arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "extract_patches",
    "source_code": "def extract_patches(self, input: Tensor, grid_size: Optional[Tuple[int, int]]=None, pad: Optional[Tuple[int, int, int, int]]=None) -> Tensor:\n    if pad is not None:\n        input = fpad(input, list(pad))\n    if grid_size is None:\n        grid_size = self.grid_size\n    window_size = (input.size(-2) // grid_size[-2], input.size(-1) // grid_size[-1])\n    stride = window_size\n    return extract_tensor_patches(input, window_size, stride)",
    "docstring": "Extract patches from tensor. Example: >>> import kornia.augmentation as K >>> pas = PatchSequential(K.ColorJiggle(0.1, 0.1, 0.1, 0.1, p=1.0), patchwise_apply=False) >>> pas.extract_patches(torch.arange(16).view(1, 1, 4, 4), grid_size=(2, 2)) tensor([[[[[ 0, 1], [ 4, 5]]], [[[ 2, 3], [ 6, 7]]], [[[ 8, 9], [12, 13]]], [[[10, 11], [14, 15]]]]]) >>> pas.extract_patches(torch.arange(54).view(1, 1, 6, 9), grid_size=(2, 2), pad=(-1, -1, -2, -2)) tensor([[[[[19, 20, 21]]], [[[22, 23, 24]]], [[[28, 29, 30]]], [[[31, 32, 33]]]]])",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\patch.py",
    "ast_data": "FunctionDef name:extract_patches arg:self arg:input arg:grid_size arg:pad arguments arg arg arg arg If Compare Assign Call Call If Compare Assign Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "tree_structure",
    "source_code": "def tree_structure(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> TreeSpec:\n    return tree_flatten(tree, is_leaf=is_leaf)[1]",
    "docstring": "Get the TreeSpec for a pytree.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:tree_structure arg:tree arg:is_leaf arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "embedding_lookup",
    "source_code": "def embedding_lookup(self, features: Any, weights: Optional[Any]=None) -> Any:\n    return cpu_embedding_lookup(features, weights, self.embedding_tables, self._feature_config)",
    "docstring": "Apply standard lookup ops on CPU. Args: features: A nested structure of s, s or s, with the same structure as . Inputs will be downcast to . Only one type out of or is supported per call. weights: If not , a nested structure of s, s or s, matching the above, except that the tensors should be of float type (and they will be downcast to ). For s we assume the are the same for the parallel entries from and similarly for s we assume the row_splits are the same. Returns: A nested structure of Tensors with the same structure as input features.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_for_serving.py",
    "ast_data": "FunctionDef name:embedding_lookup arg:self arg:features arg:weights arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_unexposed_collectives",
    "source_code": "def _get_unexposed_collectives(graph: torch.fx.Graph) -> list[torch.fx.Node]:\n\n    def _is_compute_intensive(node: torch.fx.Node) -> bool:\n        return node.target in [torch.ops.aten.mm.default]\n    collective_to_overlapping_candidates = defaultdict(list)\n    available_nodes = OrderedSet[torch.fx.Node]()\n    collective_to_overlappable_nodes = _get_collective_to_overlappable_nodes(graph)\n    for collective, overlappable_nodes in collective_to_overlappable_nodes.items():\n        candidates = [x for x in overlappable_nodes if _is_compute_intensive(x)]\n        collective_to_overlapping_candidates[collective] = candidates\n        available_nodes.update(candidates)\n    unexposed_collectives = []\n    for collective, overlapping_candidates in collective_to_overlapping_candidates.items():\n        for x in overlapping_candidates:\n            if x in available_nodes:\n                unexposed_collectives.append(collective)\n                available_nodes.remove(x)\n                break\n    return unexposed_collectives",
    "docstring": "Find all unexposed collectives in the graph. Because we don't have the runtime estimate, this function is a rough estimation using the following strong/hand-wavy assumptions: - Only a predefined set of \"compute intensive\" operation can hide a collective. - Any \"compute intensive\" operation can hide exactly one collective.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\micro_pipeline_tp.py",
    "ast_data": "FunctionDef name:_get_unexposed_collectives arg:graph arguments arg FunctionDef name:_is_compute_intensive arg:node arguments arg Return return:yes Compare Assign Call Assign Call Assign Call For Call Assign Call Assign Call Assign For Call For If Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "synchronize",
    "source_code": "def synchronize(self) -> None:\n    super().synchronize()",
    "docstring": "Wait for all the kernels in this stream to complete. .. note:: This is a wrapper around `CUDA Stream documentation`_ for more info.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\streams.py",
    "ast_data": "FunctionDef name:synchronize arg:self arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "UsageData",
    "source_code": "@dataclasses.dataclass\nclass UsageData:\n    cpu_percent: float\n    memory_percent: float\n    processes: list[dict[str, Any]]\n    gpu_list: list[GpuData]",
    "docstring": "Dataclass for storing usage data. This is the data that will be logged to the usage_log file.",
    "type": "class",
    "file_path": "pytorch\\tools\\stats\\monitor.py",
    "ast_data": "ClassDef name:UsageData"
  },
  {
    "library": "pytorch",
    "name": "replace_pattern",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef replace_pattern(gm: GraphModule, pattern: Union[Callable, GraphModule], replacement: Union[Callable, GraphModule]) -> list[Match]:\n    match_and_replacements = _replace_pattern(gm, pattern, replacement)\n    return [Match(anchor=m.anchor, nodes_map=m.nodes_map) for m in match_and_replacements]",
    "docstring": "Matches all possible non-overlapping sets of operators and their data dependencies (``, the generated Python code looks like this: .. code-block:: python def forward(self, x, w1, w2): stack_1 = torch.stack([w1, w2]) sum_1 = stack_1.sum() stack_2 = torch.stack([w1, w2]) sum_2 = stack_2.sum() max_1 = torch.max(sum_1) add_1 = x + max_1 max_2 = torch.max(sum_2) add_2 = add_1 + max_2 return add_2",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\subgraph_rewriter.py",
    "ast_data": "FunctionDef name:replace_pattern arg:gm arg:pattern arg:replacement arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_mi_cd",
    "source_code": "def _compute_mi_cd(c, d, n_neighbors):\n    n_samples = c.shape[0]\n    c = c.reshape((-1, 1))\n    radius = np.empty(n_samples)\n    label_counts = np.empty(n_samples)\n    k_all = np.empty(n_samples)\n    nn = NearestNeighbors()\n    for label in np.unique(d):\n        mask = d == label\n        count = np.sum(mask)\n        if count > 1:\n            k = min(n_neighbors, count - 1)\n            nn.set_params(n_neighbors=k)\n            nn.fit(c[mask])\n            r = nn.kneighbors()[0]\n            radius[mask] = np.nextafter(r[:, -1], 0)\n            k_all[mask] = k\n        label_counts[mask] = count\n    mask = label_counts > 1\n    n_samples = np.sum(mask)\n    label_counts = label_counts[mask]\n    k_all = k_all[mask]\n    c = c[mask]\n    radius = radius[mask]\n    kd = KDTree(c)\n    m_all = kd.query_radius(c, radius, count_only=True, return_distance=False)\n    m_all = np.array(m_all)\n    mi = digamma(n_samples) + np.mean(digamma(k_all)) - np.mean(digamma(label_counts)) - np.mean(digamma(m_all))\n    return max(0, mi)",
    "docstring": "Compute mutual information between continuous and discrete variables. Parameters ---------- c : ndarray, shape (n_samples,) Samples of a continuous random variable. d : ndarray, shape (n_samples,) Samples of a discrete random variable. n_neighbors : int Number of nearest neighbors to search for each point, see [1]_. Returns ------- mi : float Estimated mutual information in nat units. If it turned out to be negative it is replaced by 0. Notes ----- True mutual information can't be negative. If its estimate by a numerical method is negative, it means (providing the method is adequate) that the mutual information is close to 0 and replacing it by 0 is a reasonable strategy. References ---------- .. [1] B. C. Ross \"Mutual Information between Discrete and Continuous Data Sets\". PLoS ONE 9(2), 2014.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_mutual_info.py",
    "ast_data": "FunctionDef name:_compute_mi_cd arg:c arg:d arg:n_neighbors arguments arg arg arg Assign Assign Call Assign Call Assign Call Assign Call Assign Call For Call Assign Compare Assign Call If Compare Assign Call Call Call Assign Call Assign Call Assign Assign Assign Compare Assign Call Assign Assign Assign Assign Assign Call Assign Call Assign Call Assign Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_num_elements",
    "source_code": "def _num_elements(self) -> NoReturn:\n    raise NotImplementedError()",
    "docstring": "Number of elements of this Tensor. Unlike regular Tensors, the number of elements is always known for EagerTensors. This is more performant than tensor.shape.num_elements Returns: Long - num elements in the tensor",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_num_elements arg:self arguments arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "auto_contrast",
    "source_code": "def auto_contrast(probability: float, _: int) -> OperationBase:\n    return AutoContrast(probability)",
    "docstring": "Return AutoConstrast op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\autoaugment\\ops.py",
    "ast_data": "FunctionDef name:auto_contrast arg:probability arg:_ arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_broadcast_concatenate",
    "source_code": "def _broadcast_concatenate(x, y, axis):\n    x = np.moveaxis(x, axis, -1)\n    y = np.moveaxis(y, axis, -1)\n    z = np.broadcast(x[..., 0], y[..., 0])\n    x = np.broadcast_to(x, z.shape + (x.shape[-1],))\n    y = np.broadcast_to(y, z.shape + (y.shape[-1],))\n    z = np.concatenate((x, y), axis=-1)\n    return (x, y, z)",
    "docstring": "Broadcast then concatenate arrays, leaving concatenation axis last",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mannwhitneyu.py",
    "ast_data": "FunctionDef name:_broadcast_concatenate arg:x arg:y arg:axis arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_inverse_paths_linestyles",
    "source_code": "def _get_inverse_paths_linestyles(self):\n    path_patterns = [(mpath.Path(np.full((1, 2), np.nan)), ls) if ls == (0, None) else (path, mlines._get_inverse_dash_pattern(*ls)) for path, ls in zip(self._paths, itertools.cycle(self._linestyles))]\n    return zip(*path_patterns)",
    "docstring": "Returns the path and pattern for the gaps in the non-solid lines. This path and pattern is the inverse of the path and pattern used to construct the non-solid lines. For solid lines, we set the inverse path to nans to prevent drawing an inverse line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:_get_inverse_paths_linestyles arg:self arguments arg Assign Compare Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "register_grant",
    "source_code": "def register_grant(self, grant_cls, extensions=None):\n    if hasattr(grant_cls, 'check_authorization_endpoint'):\n        self._authorization_grants.append((grant_cls, extensions))\n    if hasattr(grant_cls, 'check_token_endpoint'):\n        self._token_grants.append((grant_cls, extensions))",
    "docstring": "Register a grant class into the endpoint registry. Developers can implement the grants in `` and register with this method:: class AuthorizationCodeGrant(grants.AuthorizationCodeGrant): def authenticate_user(self, credential): # ... authorization_server.register_grant(AuthorizationCodeGrant) :param grant_cls: a grant class. :param extensions: extensions for the grant class.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py",
    "ast_data": "FunctionDef name:register_grant arg:self arg:grant_cls arg:extensions arguments arg arg arg If Call Call If Call Call"
  },
  {
    "library": "pytorch",
    "name": "_low_contention_reduce_scatter",
    "source_code": "@torch.library.impl(lib, '_low_contention_reduce_scatter', 'CUDA')\ndef _low_contention_reduce_scatter(tensor: torch.Tensor, reduce_op: str, group_name: str) -> torch.Tensor:\n    symm_mem = rendezvous(tensor, group_name)\n    if symm_mem is not None:\n        return _low_contention_reduce_scatter_with_symm_mem_input(tensor, reduce_op, symm_mem)\n    else:\n        workspace = get_symm_mem_workspace(group_name, tensor.numel() * tensor.element_size())\n        return _low_contention_reduce_scatter_with_workspace(tensor, reduce_op, workspace)",
    "docstring": "Performs reduce-scatter with symmetric memory in a low-contention fashion. This implementation performs a P2P-based all-to-all followed by an offline reduction. When is already in symmetric memory: - Pull-based all-to-all is used. - No symmetric memory workspace is required. When is not in symmetric memory: - Push-based all-to-all is used. - Symmetric memory workspace size requirement: the size of . SM-usage: - SM-based copy of the rank's own chunk for the all-to-all. - Reduction on the all-to-all result. TODO(yifu): the SM-based copy can be avoided with a list-based reduction kernel.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py",
    "ast_data": "FunctionDef name:_low_contention_reduce_scatter arg:tensor arg:reduce_op arg:group_name arguments arg arg arg Assign Call If Compare Return return:yes Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "ax",
    "source_code": "@property\ndef ax(self):\n    if self.axes.shape == (1, 1):\n        return self.axes[0, 0]\n    else:\n        err = 'Use the `.axes` attribute when facet variables are assigned.'\n        raise AttributeError(err)",
    "docstring": "The :class: when no faceting variables are assigned.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:ax arg:self arguments arg If Compare Return return:yes Assign Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_update_process_group",
    "source_code": "def _update_process_group(self, new_process_group):\n    self._has_rebuilt_buckets = False\n    self.reducer._reset_state()\n    if not _rank_not_in_group(new_process_group):\n        self.process_group = new_process_group\n        self.reducer._update_process_group(new_process_group)",
    "docstring": "Dynamically updates the process group for DDP so that we can shrink/expand DDP world size without having to reinitialize DDP. NOTE: If you are using custom communications hooks via, register_comm_hook, you need to update the process groups for those hooks separately.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:_update_process_group arg:self arg:new_process_group arguments arg arg Assign Call If Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "set_ticks_position",
    "source_code": "def set_ticks_position(self, position):\n    if position == 'right':\n        self.set_tick_params(which='both', right=True, labelright=True, left=False, labelleft=False)\n        self.set_offset_position(position)\n    elif position == 'left':\n        self.set_tick_params(which='both', right=False, labelright=False, left=True, labelleft=True)\n        self.set_offset_position(position)\n    elif position == 'both':\n        self.set_tick_params(which='both', right=True, left=True)\n    elif position == 'none':\n        self.set_tick_params(which='both', right=False, left=False)\n    elif position == 'default':\n        self.set_tick_params(which='both', right=True, labelright=False, left=True, labelleft=True)\n    else:\n        _api.check_in_list(['left', 'right', 'both', 'default', 'none'], position=position)\n    self.stale = True",
    "docstring": "Set the ticks position. Parameters ---------- position : {'left', 'right', 'both', 'default', 'none'} 'both' sets the ticks to appear on both positions, but does not change the tick labels. 'default' resets the tick positions to the default: ticks on both positions, labels at left. 'none' can be used if you don't want any ticks. 'none' and 'both' affect only the ticks, not the labels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_ticks_position arg:self arg:position arguments arg arg If Compare Call Call If Compare Call Call If Compare Call If Compare Call If Compare Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "linear_refinement_rule",
    "source_code": "@register_refinement_rule(torch.nn.Linear)\ndef linear_refinement_rule(n: Node):\n    res = []\n    assert isinstance(n.args[0], Node)\n    arg_type = n.args[0].type\n    if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType):\n        res = [Equality(arg_type.__args__[0], n.type.__args__[0])]\n    return res",
    "docstring": "The equality constraints are between the first dimension of the input and output",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:linear_refinement_rule arg:n arguments arg Assign Call Assign If BoolOp Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "logpdf",
    "source_code": "def logpdf(self, x, mu=None, kappa=1):\n    dim, mu, kappa = self._process_parameters(mu, kappa)\n    return self._logpdf(x, dim, mu, kappa)",
    "docstring": "Log of the von Mises-Fisher probability density function. Parameters ---------- x : array_like Points at which to evaluate the log of the probability density function. The last axis of must correspond to unit vectors of the same dimensionality as the distribution. mu : array_like, default: None Mean direction of the distribution. Must be a one-dimensional unit vector of norm 1. kappa : float, default: 1 Concentration parameter. Must be positive. Returns ------- logpdf : ndarray or scalar Log of the probability density function evaluated at .",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:logpdf arg:self arg:x arg:mu arg:kappa arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "_setup_workers",
    "source_code": "def _setup_workers(self, num_workers):\n    self.pool = []\n    for _ in range(num_workers):\n        self.pool.append(Thread(target=self.threadloop))\n    for a_thread in self.pool:\n        a_thread.setDaemon(True)\n        a_thread.start()",
    "docstring": "Sets up the worker threads NOTE: undefined behaviour if you call this again.",
    "type": "method",
    "file_path": "pygame\\src_py\\threads\\__init__.py",
    "ast_data": "FunctionDef name:_setup_workers arg:self arg:num_workers arguments arg arg Assign For Call Call Call For Call Call"
  },
  {
    "library": "django",
    "name": "_batched_insert",
    "source_code": "def _batched_insert(self, objs, fields, batch_size, on_conflict=None, update_fields=None, unique_fields=None):\n    connection = connections[self.db]\n    ops = connection.ops\n    max_batch_size = max(ops.bulk_batch_size(fields, objs), 1)\n    batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size\n    inserted_rows = []\n    bulk_return = connection.features.can_return_rows_from_bulk_insert\n    for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:\n        if bulk_return and (on_conflict is None or on_conflict == OnConflict.UPDATE):\n            inserted_rows.extend(self._insert(item, fields=fields, using=self.db, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, returning_fields=self.model._meta.db_returning_fields))\n        else:\n            self._insert(item, fields=fields, using=self.db, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields)\n    return inserted_rows",
    "docstring": "Helper method for bulk_create() to insert objs one batch at a time.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:_batched_insert arg:self arg:objs arg:fields arg:batch_size arg:on_conflict arg:update_fields arg:unique_fields arguments arg arg arg arg arg arg arg Assign Assign Assign Call Call Assign Call Assign Assign For Call Call If BoolOp BoolOp Compare Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "SingleThreadedFlatMapDataset",
    "source_code": "class SingleThreadedFlatMapDataset(dataset_ops.UnaryDataset):\n\n    def __init__(self, input_dataset, map_func):\n        self._input_dataset = input_dataset\n        self._map_func = structured_function.StructuredFunctionWrapper(map_func, self._transformation_name(), dataset=input_dataset, defun_kwargs={'_executor': 'SINGLE_THREADED_EXECUTOR'})\n        self._structure = self._map_func.output_structure._element_spec\n        variant_tensor = gen_dataset_ops.flat_map_dataset(input_dataset._variant_tensor, self._map_func.function.captured_inputs, f=self._map_func.function, **self._flat_structure)\n        super(SingleThreadedFlatMapDataset, self).__init__(input_dataset, variant_tensor)\n\n    def _functions(self):\n        return [self._map_func]\n\n    @property\n    def element_spec(self):\n        return self._structure\n\n    def _transformation_name(self):\n        return 'SingleThreadedFlatMapDataset'",
    "docstring": "A that maps a function over its input and flattens the result.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\from_tensor_slices_benchmark.py",
    "ast_data": "ClassDef name:SingleThreadedFlatMapDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:map_func arguments arg arg arg Assign Assign Call Call Assign Assign Call Call Call FunctionDef name:_functions arg:self arguments arg Return return:yes FunctionDef name:element_spec arg:self arguments arg Return return:yes FunctionDef name:_transformation_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_detector_name",
    "source_code": "def get_detector_name(self) -> str:\n    return 'per_channel_detector'",
    "docstring": "returns the string name of this detector",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:get_detector_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_eval_using_default_session",
    "source_code": "def _eval_using_default_session(tensors, feed_dict, graph, session=None):\n    if session is None:\n        session = stack.get_default_session()\n        if session is None:\n            raise ValueError('Cannot evaluate tensor using `eval()`: No default session is registered. Use `with sess.as_default()` or pass an explicit session to `eval(session=sess)`')\n        if session.graph is not graph:\n            raise ValueError(\"Cannot use the default session to evaluate tensor: the tensor's graph is different from the session's graph. Pass an explicit session to `eval(session=sess)`.\")\n    elif session.graph is not graph:\n        raise ValueError(\"Cannot use the given session to evaluate tensor: the tensor's graph is different from the session's graph.\")\n    return session.run(tensors, feed_dict)",
    "docstring": "Uses the default session to evaluate one or more tensors. Args: tensors: A single Tensor, or a list of Tensor objects. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which the tensors are defined. session: (Optional) A different session to use to evaluate \"tensors\". Returns: Either a single numpy ndarray if \"tensors\" is a single tensor; or a list of numpy ndarrays that each correspond to the respective element in \"tensors\". Raises: ValueError: If no default session is available; the default session does not have \"graph\" as its graph; or if \"session\" is specified, and it does not have \"graph\" as its graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:_eval_using_default_session arg:tensors arg:feed_dict arg:graph arg:session arguments arg arg arg arg If Compare Assign Call If Compare Raise Call If Compare Raise Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_fill_order",
    "source_code": "def get_fill_order(self) -> Optional[list[int]]:\n    if isinstance(self.layout, FlexibleLayout):\n        (index_vars, reduction_vars), _ = dependencies.index_vars_squeeze(self.data.get_pointwise_size(), self.data.get_reduction_size())\n        reads = self.get_read_writes().reads\n        assert all((isinstance(r, (dependencies.StarDep, dependencies.MemoryDep)) for r in reads))\n        reads = [sympy_subs(r.index, {v: sympy.S.Zero for v in reduction_vars if v != 0}) for r in reads if isinstance(r, dependencies.MemoryDep)]\n        if reads:\n            if isinstance(self.data, (Scan, Sort)):\n                indices = self.data.reindex(index_vars, reduction_vars)\n            else:\n                indices = index_vars\n            stride_lengths = [V.graph.sizevars.stride_hints(expr, indices) for expr in reads]\n            from .scheduler import pick_loop_order\n            return pick_loop_order(stride_lengths, self.get_size())\n    return None",
    "docstring": "If our layout is still flexible, try to determine the stride order based on stride orders of reads. TODO(jansel): A better algorithm here would look at downstream consumers of this value and try to do global graph-level layout optimization. This is also something just begging to be autotuned.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:get_fill_order arg:self arguments arg If Call Assign Call Call Call Assign Call Call Call Assign Call Compare Call If If Call Assign Call Assign Assign Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name, loss_scale='auto'):\n    super(PolicyV1, self).__init__(name)\n    if loss_scale == 'auto':\n        loss_scale = 'dynamic' if name == 'mixed_float16' else None\n        self._using_default_loss_scale = True\n    else:\n        self._using_default_loss_scale = False\n    if loss_scale and self._compute_dtype not in (None, 'float16'):\n        tf_logging.warning('Creating a Policy with a loss scale is only useful for float16 policies. You passed loss_scale=%r for policy %s. Consider not passing any loss_scale instead.' % (loss_scale, name))\n    self._loss_scale = keras_loss_scale_module.get(loss_scale)",
    "docstring": "Constructs the policy. The argument determines the compute and variable dtype, the default loss scale, and has no additional effect on the Policy. The compute and variable dtypes can only be specified through , and cannot be specified directly. Args: name: A string. Can be one of the following values: * Any dtype name, such as 'float32' or 'float64'. Both the variable and compute dtypes will be that dtype. * 'mixed_float16' or 'mixed_bfloat16': The compute dtype is float16 or bfloat16, while the variable dtype is float32. With 'mixed_float16', a dynamic loss scale is used. These policies are used for mixed precision training. loss_scale: A , an int (which uses a ), the string \"dynamic\" (which uses a ), or None (which uses no loss scale). Defaults to . In the case: 1) if is , then use . 2) otherwise, do not use a loss scale. Only s, not layers, use the loss scale, and it is only used during , , and other similar methods.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\policy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:loss_scale arguments arg arg arg Call Call If Compare Assign Compare Assign Assign If BoolOp Compare Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "regex_replace",
    "source_code": "@tf_export('strings.regex_replace', v1=['strings.regex_replace', 'regex_replace'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('regex_replace')\ndef regex_replace(input, pattern, rewrite, replace_global=True, name=None):\n    if isinstance(pattern, util_compat.bytes_or_text_types) and isinstance(rewrite, util_compat.bytes_or_text_types):\n        return gen_string_ops.static_regex_replace(input=input, pattern=pattern, rewrite=rewrite, replace_global=replace_global, name=name)\n    return gen_string_ops.regex_replace(input=input, pattern=pattern, rewrite=rewrite, replace_global=replace_global, name=name)",
    "docstring": "Replace elements of matching regex with . >>> tf.strings.regex_replace(\"Text with tags.contains html\", ... \"]+>\", \" \") Args: input: string , the source strings to process. pattern: string or scalar string , regular expression to use, see more details at rewrite: string or scalar string , value to use in match replacement, supports backslash-escaped digits (\\1 to \\9) can be to insert text matching corresponding parenthesized group. replace_global: , if replace all non-overlapping matches, else replace only the first match. name: A name for the operation (optional). Returns: string of the same shape as with specified replacements.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\string_ops.py",
    "ast_data": "FunctionDef name:regex_replace arg:input arg:pattern arg:rewrite arg:replace_global arg:name arguments arg arg arg arg arg If BoolOp Call Call Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_lazy_and",
    "source_code": "def _tf_lazy_and(cond, b):\n    return tf_cond.cond(cond, b, lambda: cond)",
    "docstring": "Lazy-eval equivalent of \"and\" for Tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py",
    "ast_data": "FunctionDef name:_tf_lazy_and arg:cond arg:b arguments arg arg Return return:yes Call arguments"
  },
  {
    "library": "cryptography",
    "name": "_escape_dn_value",
    "source_code": "def _escape_dn_value(val: str | bytes) -> str:\n    if not val:\n        return ''\n    if isinstance(val, bytes):\n        return '#' + binascii.hexlify(val).decode('utf8')\n    val = val.replace('\\\\', '\\\\\\\\')\n    val = val.replace('\"', '\\\\\"')\n    val = val.replace('+', '\\\\+')\n    val = val.replace(',', '\\\\,')\n    val = val.replace(';', '\\\\;')\n    val = val.replace('<', '\\\\<')\n    val = val.replace('>', '\\\\>')\n    val = val.replace('\\x00', '\\\\00')\n    if val[0] in ('#', ' '):\n        val = '\\\\' + val\n    if val[-1] == ' ':\n        val = val[:-1] + '\\\\ '\n    return val",
    "docstring": "Escape special characters in RFC4514 Distinguished Name value.",
    "type": "function",
    "file_path": "cryptography\\src\\cryptography\\x509\\name.py",
    "ast_data": "FunctionDef name:_escape_dn_value arg:val arguments arg If Return return:yes If Call Return return:yes Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return self.header() + '\\n            <p>\\n            And this is the amazing second page!\\n            </p>\\n        ' + self.footer()",
    "docstring": "Produce HTTP response body of another page app index URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut05_derived_objects.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_initialized",
    "source_code": "def is_initialized():\n    return _initialized and (not _is_in_bad_fork())",
    "docstring": "Return whether PyTorch's CUDA state has been initialized.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:is_initialized arguments Return return:yes BoolOp Call"
  },
  {
    "library": "django",
    "name": "sanitize_separators",
    "source_code": "def sanitize_separators(value):\n    if isinstance(value, str):\n        parts = []\n        decimal_separator = get_format('DECIMAL_SEPARATOR')\n        if decimal_separator in value:\n            value, decimals = value.split(decimal_separator, 1)\n            parts.append(decimals)\n        if settings.USE_THOUSAND_SEPARATOR:\n            thousand_sep = get_format('THOUSAND_SEPARATOR')\n            if thousand_sep == '.' and value.count('.') == 1 and (len(value.split('.')[-1]) != 3):\n                pass\n            else:\n                for replacement in {thousand_sep, unicodedata.normalize('NFKD', thousand_sep)}:\n                    value = value.replace(replacement, '')\n        parts.append(value)\n        value = '.'.join(reversed(parts))\n    return value",
    "docstring": "Sanitize a value according to the current decimal and thousand separator setting. Used with form field input.",
    "type": "function",
    "file_path": "django\\django\\utils\\formats.py",
    "ast_data": "FunctionDef name:sanitize_separators arg:value arguments arg If Call Assign Assign Call If Compare Assign Call Call If Assign Call If BoolOp Compare Compare Call Compare Call Call For Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "gca",
    "source_code": "def gca(self):\n    ax = self._axstack.current()\n    return ax if ax is not None else self.add_subplot()",
    "docstring": "Get the current Axes. If there is currently no Axes on this Figure, a new one is created using . (To test whether there is currently an Axes on a Figure, check whether `.pyplot.get_fignums()` is empty.)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:gca arg:self arguments arg Assign Call Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "register_extension",
    "source_code": "def register_extension(op_type: type[Any], extension_handler: type[ExtensionHandler]):\n    assert issubclass(extension_handler, ExtensionHandler), f'Expected ExtensionHandler, got {extension_handler}.'\n    assert op_type not in _serialization_registry, f'{op_type} is already registered.'\n    assert isinstance(op_type, type)\n    assert not (op_type.__module__.startswith('torch') or op_type.__module__.startswith('builtins'))\n    assert extension_handler.namespace() not in _deserialization_registry\n    _serialization_registry[op_type] = extension_handler\n    _deserialization_registry[extension_handler.namespace()] = extension_handler",
    "docstring": "Register custom de/serialization method for a node with non-standard type.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\serde\\serialize.py",
    "ast_data": "FunctionDef name:register_extension arg:op_type arg:extension_handler arguments arg arg Call Compare Call BoolOp Call Call Compare Call Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_launch_backward",
    "source_code": "def _launch_backward(self, test_case, iters, print_per_iter=False):\n    test_case.run_forward(num_runs=1, print_per_iter=False, cuda_sync=False)\n    test_case._output_mean()\n    backward_time = timeit.timeit(functools.partial(test_case.run_backward, iters, print_per_iter), number=1)\n    return backward_time",
    "docstring": "This function runs forward path of an op to get an output. Then the backward path is executed and the execution time is reported",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_core.py",
    "ast_data": "FunctionDef name:_launch_backward arg:self arg:test_case arg:iters arg:print_per_iter arguments arg arg arg arg Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "redact",
    "source_code": "def redact(self, needle: str, replace: str='<REDACTED>') -> None:\n    if needle == '':\n        return\n    self.redactions[needle] = replace",
    "docstring": "Redact specific strings; e.g., authorization tokens. This won't retroactively redact stuff you've already leaked, so make sure you redact things as soon as possible.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:redact arg:self arg:needle arg:replace arguments arg arg arg If Compare Return return:no Assign"
  },
  {
    "library": "tensorflow",
    "name": "saveable_object_to_restore_fn",
    "source_code": "def saveable_object_to_restore_fn(saveables):\n\n    def _restore_from_tensors(restored_tensors):\n        restore_ops = {}\n        for saveable in saveables:\n            saveable_restored_tensors = []\n            for spec in saveable.specs:\n                name = trackable_utils.extract_local_name(_convert_to_string(spec.name))\n                slice_spec = _convert_to_string(spec.slice_spec)\n                maybe_tensor = restored_tensors[name]\n                if not isinstance(maybe_tensor, dict):\n                    maybe_tensor = {'': maybe_tensor}\n                saveable_restored_tensors.append(maybe_tensor[slice_spec])\n            restore_ops[saveable.name] = saveable.restore(saveable_restored_tensors, restored_shapes=None)\n        return restore_ops\n    return _restore_from_tensors",
    "docstring": "Generates from SaveableObjects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:saveable_object_to_restore_fn arg:saveables arguments arg FunctionDef name:_restore_from_tensors arg:restored_tensors arguments arg Assign For Assign For Assign Call Call Assign Call Assign If Call Assign Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "load_all_fcompiler_classes",
    "source_code": "def load_all_fcompiler_classes():\n    from glob import glob\n    global fcompiler_class, fcompiler_aliases\n    if fcompiler_class is not None:\n        return\n    pys = os.path.join(os.path.dirname(__file__), '*.py')\n    fcompiler_class = {}\n    fcompiler_aliases = {}\n    for fname in glob(pys):\n        module_name, ext = os.path.splitext(os.path.basename(fname))\n        module_name = 'numpy.distutils.fcompiler.' + module_name\n        __import__(module_name)\n        module = sys.modules[module_name]\n        if hasattr(module, 'compilers'):\n            for cname in module.compilers:\n                klass = getattr(module, cname)\n                desc = (klass.compiler_type, klass, klass.description)\n                fcompiler_class[klass.compiler_type] = desc\n                for alias in klass.compiler_aliases:\n                    if alias in fcompiler_aliases:\n                        raise ValueError('alias %r defined for both %s and %s' % (alias, klass.__name__, fcompiler_aliases[alias][1].__name__))\n                    fcompiler_aliases[alias] = desc",
    "docstring": "Cache all the FCompiler classes found in modules in the numpy.distutils.fcompiler package.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:load_all_fcompiler_classes arguments If Compare Return return:no Assign Call Call Assign Assign For Call Assign Call Call Assign Call Assign If Call For Assign Call Assign Assign For If Compare Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_extract_fake_inputs",
    "source_code": "def _extract_fake_inputs(gm, args, kwargs):\n    fake_inps: list[Any] = []\n    fake_vals: list[Any] = []\n    for node in gm.graph.nodes:\n        if node.op == 'placeholder':\n            fake_inps.append(node.meta.get('val'))\n        else:\n            fake_vals.append(node.meta.get('example_value'))\n    detected_fake_mode = detect_fake_mode(fake_inps + fake_vals)\n    detected_shape_env = detect_shape_env(fake_inps + fake_vals)\n    if detected_fake_mode:\n        if detected_shape_env:\n            assert detected_shape_env is detected_fake_mode.shape_env, \"Detected shape env does not match fake mode's shape env\"\n        fake_mode = detected_fake_mode\n    elif detected_shape_env:\n        fake_mode = FakeTensorMode(shape_env=detected_shape_env, export=True)\n    else:\n        fake_mode = FakeTensorMode(shape_env=ShapeEnv(), export=True)\n    count = 0\n\n    def lookup_fake(x):\n        nonlocal count\n        val = fake_inps[count] if isinstance(x, (int, torch.Tensor)) else x\n        count += 1\n        return val\n    fake_args = pytree.tree_map(lookup_fake, args)\n    fake_kwargs = pytree.tree_map(lookup_fake, kwargs)\n    return (fake_args, fake_kwargs, fake_mode)",
    "docstring": "Given a graph module, extract fakified input tensors from the metadata of its placeholders, and map them to the structure of given args and kwargs. Also return the fake mode used to fakify those inputs.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "FunctionDef name:_extract_fake_inputs arg:gm arg:args arg:kwargs arguments arg arg arg For If Compare Call Call Call Call Assign Call Assign Call If If Compare Assign If Assign Call Assign Call Call Assign FunctionDef name:lookup_fake arg:x arguments arg Assign Call Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "encode",
    "source_code": "def encode(self, session_dict):\n    session_store_class = self.model.get_session_store_class()\n    return session_store_class().encode(session_dict)",
    "docstring": "Return the given session dictionary serialized and encoded as a string.",
    "type": "method",
    "file_path": "django\\django\\contrib\\sessions\\base_session.py",
    "ast_data": "FunctionDef name:encode arg:self arg:session_dict arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "nodata_value",
    "source_code": "@nodata_value.setter\ndef nodata_value(self, value):\n    if value is None:\n        capi.delete_band_nodata_value(self._ptr)\n    elif not isinstance(value, (int, float)):\n        raise ValueError('Nodata value must be numeric or None.')\n    else:\n        capi.set_band_nodata_value(self._ptr, value)\n    self._flush()",
    "docstring": "Set the nodata value for this band.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py",
    "ast_data": "FunctionDef name:nodata_value arg:self arg:value arguments arg arg If Compare Call If Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_duration_microseconds",
    "source_code": "def _get_duration_microseconds(start_time_seconds, end_time_seconds):\n    if end_time_seconds < start_time_seconds:\n        return 0\n    return round((end_time_seconds - start_time_seconds) * 1000000)",
    "docstring": "Calculate the duration between start and end time. Args: start_time_seconds: The start time in seconds. end_time_seconds: The end time in seconds. Returns: The duration between the start and the end time. Return 0 if end_time_seconds < start_time_seconds.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:_get_duration_microseconds arg:start_time_seconds arg:end_time_seconds arguments arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "determine_observer_insert_points",
    "source_code": "def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> dict[str, dict[str, Any]]:\n    obs_ctr = ModelReportObserver\n    obs_fqn_to_info: dict[str, dict[str, Any]] = {}\n    for fqn, module in prepared_fx_model.named_modules():\n        if self._is_supported(module, insert=True):\n            targeted_node = self._get_targeting_node(prepared_fx_model, fqn)\n            pre_obs_fqn = fqn + '.' + self.DEFAULT_PRE_OBSERVER_NAME\n            obs_fqn_to_info[pre_obs_fqn] = {DETECTOR_TARGET_NODE_KEY: targeted_node, DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(ch_axis=self.ch_axis), DETECTOR_IS_POST_OBS_KEY: False, DETECTOR_OBS_ARGS_KEY: targeted_node.args}\n    return obs_fqn_to_info",
    "docstring": "Determines where observers need to be inserted for the Input Weight Equalization Detector. For this detector, we want to place observers in front of supported layers. Currently inserts observers for: linear layers conv layers Args: prepared_fx_model (GraphModule): The prepared Fx GraphModule Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with: key \"target_node\" -> the node we are trying to observe with this observer (torch.fx.node.Node) key \"observer_to_insert\" -> the observer we wish to insert (ObserverBase) key \"is_post_observer\" -> True if this is meant to be a post-observer for target_node, False if pre-observer key \"observer_args\" -> The arguments that are meant to be passed into the observer",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:determine_observer_insert_points arg:self arg:prepared_fx_model arguments arg arg Assign For Call If Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, *args):\n    return self.opt",
    "docstring": "Return stored dict.",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_quadpack_py.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "friedmanchisquare",
    "source_code": "def friedmanchisquare(*args):\n    data = argstoarray(*args).astype(float)\n    k = len(data)\n    if k < 3:\n        raise ValueError(f'Less than 3 groups ({k}): the Friedman test is NOT appropriate.')\n    ranked = ma.masked_values(rankdata(data, axis=0), 0)\n    if ranked._mask is not nomask:\n        ranked = ma.mask_cols(ranked)\n        ranked = ranked.compressed().reshape(k, -1).view(ndarray)\n    else:\n        ranked = ranked._data\n    k, n = ranked.shape\n    repeats = [find_repeats(row) for row in ranked.T]\n    ties = np.array([y for x, y in repeats if x.size > 0])\n    tie_correction = 1 - (ties ** 3 - ties).sum() / float(n * (k ** 3 - k))\n    ssbg = np.sum((ranked.sum(-1) - n * (k + 1) / 2.0) ** 2)\n    chisq = ssbg * 12.0 / (n * k * (k + 1)) * 1.0 / tie_correction\n    return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))",
    "docstring": "Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA. This function calculates the Friedman Chi-square test for repeated measures and returns the result, along with the associated probability value. Each input is considered a given group. Ideally, the number of treatments among each group should be equal. If this is not the case, only the first n treatments are taken into account, where n is the number of treatments of the smallest group. If a group has some missing values, the corresponding treatments are masked in the other groups. The test statistic is corrected for ties. Masked values in one group are propagated to the other groups. Returns ------- statistic : float the test statistic. pvalue : float the associated p-value.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:friedmanchisquare arguments arg Assign Call Call Assign Call If Compare Raise Call Assign Call Call If Compare Assign Call Assign Call Call Call Assign Assign Assign Call Assign Call Compare Assign Call Call Assign Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_assert_msg_data",
    "source_code": "def _make_assert_msg_data(sym, x, y, summarize, test_op):\n    data = []\n    data.append('Condition x %s y did not hold.' % sym)\n    if summarize > 0:\n        if x.shape == y.shape and x.shape.as_list():\n            mask = math_ops.logical_not(test_op)\n            indices = array_ops.where(mask)\n            indices_np = indices.numpy()\n            x_vals = array_ops.boolean_mask(x, mask)\n            y_vals = array_ops.boolean_mask(y, mask)\n            num_vals = min(summarize, indices_np.shape[0])\n            data.append('Indices of first %d different values:' % num_vals)\n            data.append(indices_np[:num_vals])\n            data.append('Corresponding x values:')\n            data.append(x_vals.numpy().reshape((-1,))[:num_vals])\n            data.append('Corresponding y values:')\n            data.append(y_vals.numpy().reshape((-1,))[:num_vals])\n        x_np = x.numpy().reshape((-1,))\n        y_np = y.numpy().reshape((-1,))\n        x_sum = min(x_np.size, summarize)\n        y_sum = min(y_np.size, summarize)\n        data.append('First %d elements of x:' % x_sum)\n        data.append(x_np[:x_sum])\n        data.append('First %d elements of y:' % y_sum)\n        data.append(y_np[:y_sum])\n    return data",
    "docstring": "Subroutine of _binary_assert that generates the components of the default error message when running in eager mode. Args: sym: Mathematical symbol for the test to apply to pairs of tensor elements, i.e. \"==\" x: First input to the assertion after applying y: Second input to the assertion summarize: Value of the \"summarize\" parameter to the original assert_* call; tells how many elements of each tensor to print. test_op: TensorFlow op that returns a Boolean tensor with True in each position where the assertion is satisfied. Returns: List of tensors and scalars that, when stringified and concatenated, will produce the error message string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:_make_assert_msg_data arg:sym arg:x arg:y arg:summarize arg:test_op arguments arg arg arg arg arg Assign Call If Compare If BoolOp Compare Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Call Call Call Call Call Call Assign Call Call Assign Call Call Assign Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_resolve_hostlist",
    "source_code": "def _resolve_hostlist(self):\n    return expand_hostlist(_get_slurm_var('STEP_NODELIST'))",
    "docstring": "Returns a list of hostnames for nodes running the current job step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py",
    "ast_data": "FunctionDef name:_resolve_hostlist arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "cast",
    "source_code": "@classmethod\ndef cast(cls, series, domain=None, window=None):\n    if domain is None:\n        domain = cls.domain\n    if window is None:\n        window = cls.window\n    return series.convert(domain, cls, window)",
    "docstring": "Convert series to series of this class. The is expected to be an instance of some polynomial series of one of the types supported by by the numpy.polynomial module, but could be some other class that supports the convert method. Parameters ---------- series : series The series instance to be converted. domain : {None, array_like}, optional If given, the array must be of the form `series` when evaluated. See Also -------- convert : similar instance method",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:cast arg:cls arg:series arg:domain arg:window arguments arg arg arg arg If Compare Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_populate_cpu_children",
    "source_code": "def _populate_cpu_children(self):\n    sync_events = [evt for evt in self if not evt.is_async and evt.device_type == DeviceType.CPU]\n    events = sorted(sync_events, key=attrgetter('thread'))\n    threads = itertools.groupby(events, key=lambda event: (event.thread, event.node_id))\n    for _thread_id, thread_events in threads:\n        thread_events_ = sorted(thread_events, key=lambda event: [event.time_range.start, -event.time_range.end])\n        current_events: list[FunctionEvent] = []\n        for event in thread_events_:\n            while len(current_events) > 0:\n                parent = current_events[-1]\n                if event.time_range.start >= parent.time_range.end or event.time_range.end > parent.time_range.end:\n                    current_events.pop()\n                else:\n                    parent.append_cpu_child(event)\n                    assert event.cpu_parent is None, f'There is already a CPU parent event for {event.key}'\n                    event.set_cpu_parent(parent)\n                    break\n            current_events.append(event)",
    "docstring": "Populate child events into each underlying FunctionEvent object. One event is a child of another if [s1, e1) is inside [s2, e2). Where s1 and e1 would be start and end of the child event's interval. And s2 and e2 start and end of the parent event's interval Example: In event list [[0, 10], [1, 3], [3, 4]] would have make [0, 10] be a parent of two other intervals. If for any reason two intervals intersect only partially, this function will not record a parent child relationship between then.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler_util.py",
    "ast_data": "FunctionDef name:_populate_cpu_children arg:self arguments arg Assign BoolOp Compare Assign Call Call Assign Call arguments arg For Assign Call arguments arg For While Compare Call Assign If BoolOp Compare Compare Call Call Compare Call Call"
  },
  {
    "library": "seaborn",
    "name": "_forward",
    "source_code": "def _forward(self, values):\n    return np.square(values)",
    "docstring": "Square native values to implement linear scaling of point area.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:_forward arg:self arg:values arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SleepDataset",
    "source_code": "class _SleepDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, sleep_microseconds):\n        self._input_dataset = input_dataset\n        self._sleep_microseconds = sleep_microseconds\n        variant_tensor = gen_experimental_dataset_ops.sleep_dataset(self._input_dataset._variant_tensor, self._sleep_microseconds, **self._flat_structure)\n        super(_SleepDataset, self).__init__(input_dataset, variant_tensor)",
    "docstring": "A that sleeps before producing each upstream element.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\sleep.py",
    "ast_data": "ClassDef name:_SleepDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:sleep_microseconds arguments arg arg arg Assign Assign Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "time_query",
    "source_code": "def time_query(self, mnr, p, boxsize, leafsize):\n    self.T.query(self.queries, p=p)",
    "docstring": "Querying kd-tree dim | # points | # queries | KDTree | cKDTree | flat cKDTree",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_query arg:self arg:mnr arg:p arg:boxsize arg:leafsize arguments arg arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "start",
    "source_code": "def start(self):\n    raise NotImplementedError('Please use create_server method to create aconcrete subclass of Server.')",
    "docstring": "Starts the RPC server on provided address. Server listens for new requests from client, once it is started.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\rpc\\rpc_ops.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "void_output",
    "source_code": "def void_output(func, argtypes, errcheck=True, cpl=False):\n    if argtypes:\n        func.argtypes = argtypes\n    if errcheck:\n        func.restype = c_int\n        func.errcheck = partial(check_errcode, cpl=cpl)\n    else:\n        func.restype = None\n    return func",
    "docstring": "For functions that don't only return an error code that needs to be examined.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py",
    "ast_data": "FunctionDef name:void_output arg:func arg:argtypes arg:errcheck arg:cpl arguments arg arg arg arg If Assign If Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "unsubscribe",
    "source_code": "def unsubscribe(self):\n    for channel in self.bus.listeners:\n        method = getattr(self, channel, None)\n        if method is not None:\n            self.bus.unsubscribe(channel, method)",
    "docstring": "Unregister this object as a listener on the bus.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:unsubscribe arg:self arguments arg For Assign Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "bias_addmm",
    "source_code": "def bias_addmm(inp, mat1, mat2, *, out=None, alpha=1, beta=1):\n    if inp.stride(0) == 0 or inp.size(0) == 1:\n        return torch.addmm(inp[0], mat1, mat2, out=out, alpha=alpha, beta=beta)\n    return torch.addmm(inp, mat1, mat2, out=out, alpha=alpha, beta=beta)",
    "docstring": "Giving torch.addmm a 1D tensor calls a different (faster) cublasLt kernel under the hood. There are a few shapes where this is slower, but they are rare.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\mm.py",
    "ast_data": "FunctionDef name:bias_addmm arg:inp arg:mat1 arg:mat2 arguments arg arg arg arg arg arg If BoolOp Compare Call Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_generate_temporary_array_pointer",
    "source_code": "@staticmethod\ndef _generate_temporary_array_pointer(c_type: str, elements: Sequence[str], *, force_mutable: bool=False) -> str:\n    ptr_call = 'data()' if force_mutable or c_type.endswith('*') else 'cbegin()'\n    return f'std::array<{c_type}, {len(elements)}>{{{', '.join(elements)}}}.{ptr_call}'",
    "docstring": "Get a pointer to an array that only exists for the duration of the C++ statement it's used in.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_wrapper_cpu.py",
    "ast_data": "FunctionDef name:_generate_temporary_array_pointer arg:c_type arg:elements arguments arg arg arg Assign BoolOp Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "notify_join_context",
    "source_code": "@staticmethod\ndef notify_join_context(joinable: Joinable):\n    assert hasattr(joinable, '_join_config'), f'Check that the {type(joinable)} constructor calls the ``Joinable`` constructor'\n    join_config = joinable._join_config\n    if not join_config.is_first_joinable or not join_config.enable:\n        return None\n    device = joinable.join_device\n    process_group = joinable.join_process_group\n    ones = torch.ones(1, device=device)\n    work = dist.all_reduce(ones, group=process_group, async_op=True)\n    if join_config.throw_on_early_termination:\n        zeros = torch.zeros(1, device=device)\n        dist.all_reduce(zeros, group=process_group)\n        should_throw = zeros.item()\n        if should_throw:\n            raise RuntimeError('Detected at least one rank that exhausted inputs. Throwing across all ranks.')\n    return work",
    "docstring": "Notifies the join context manager that the calling process has not yet joined. Then, if `JoinableDistributedDataParallelJoinableJoinable` otherwise.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "FunctionDef name:notify_join_context arg:joinable arguments arg Call Call Assign If BoolOp Return return:no Assign Assign Assign Call Assign Call If Assign Call Call Assign Call If Raise Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, x: Tensor) -> Tensor:\n    if x.size(2) > self.pe.size(2) or x.size(3) > self.pe.size(3):\n        max_shape = (max(x.size(2), self.pe.size(2)), max(x.size(3), self.pe.size(3)))\n        self.update_position_encoding_size(max_shape)\n    return x + self.pe[:, :, :x.size(2), :x.size(3)]",
    "docstring": "Run forward. Args: x: [N, C, H, W]",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\loftr\\utils\\position_encoding.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x arguments arg arg If BoolOp Compare Call Call Compare Call Call Assign Call Call Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_replace_nans",
    "source_code": "def _replace_nans(self, data: DataFrame) -> DataFrame:\n    for c in data:\n        dtype = data[c].dtype\n        if dtype in (np.float32, np.float64):\n            if dtype == np.float32:\n                replacement = self.MISSING_VALUES['f']\n            else:\n                replacement = self.MISSING_VALUES['d']\n            data[c] = data[c].fillna(replacement)\n    return data",
    "docstring": "Checks floating point data columns for nans, and replaces these with the generic Stata for missing value (.)",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_replace_nans arg:self arg:data arguments arg arg For Assign If Compare If Compare Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_maybe_recursive_remove_dequantize",
    "source_code": "def _maybe_recursive_remove_dequantize(arg: Any, node: Node, graph: Graph) -> None:\n    if isinstance(arg, Node) and arg.op == 'call_method' and (arg.target == 'dequantize'):\n        quantize_node = arg.args[0]\n        node.replace_input_with(arg, quantize_node)\n    elif isinstance(arg, (list, tuple)):\n        for arg_element in arg:\n            _maybe_recursive_remove_dequantize(arg_element, node, graph)\n    elif isinstance(arg, dict):\n        for arg_element in arg.values():\n            _maybe_recursive_remove_dequantize(arg_element, node, graph)\n    else:\n        warnings.warn(f'Unsupported node type in recursive remove dequantize: {type(arg)}')",
    "docstring": "If the arg is a dequantize Node, or a list/tuple/dict of dequantize Node, we'll recursively remove the dequantize Node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\convert.py",
    "ast_data": "FunctionDef name:_maybe_recursive_remove_dequantize arg:arg arg:node arg:graph arguments arg arg arg If BoolOp Call Compare Compare Assign Call If Call For Call If Call For Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_artist_props",
    "source_code": "def _set_artist_props(self, a):\n    a.set_figure(self.get_figure(root=False))\n    if not a.is_transform_set():\n        a.set_transform(self.transData)\n    a.axes = self\n    if a.get_mouseover():\n        self._mouseover_set.add(a)",
    "docstring": "Set the boilerplate props for artists added to Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_set_artist_props arg:self arg:a arguments arg arg Call Call If Call Call Assign If Call Call"
  },
  {
    "library": "scrapy",
    "name": "_check_received_data",
    "source_code": "def _check_received_data(self, data: bytes) -> None:\n    if data.startswith(b'HTTP/2.0 405 Method Not Allowed'):\n        raise MethodNotAllowed405(self.metadata['ip_address'])",
    "docstring": "Checks for edge cases where the connection to remote fails without raising an appropriate H2Error Arguments: data -- Data received from the remote",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py",
    "ast_data": "FunctionDef name:_check_received_data arg:self arg:data arguments arg arg If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_SparseSegmentSumGrad",
    "source_code": "@ops.RegisterGradient('SparseSegmentSum')\ndef _SparseSegmentSumGrad(op: ops.Operation, grad):\n    if _GetOpAttrOrNone(op, 'sparse_gradient'):\n        return (_SparseSegmentReduceGradV2(op, grad), None, None)\n    dim0 = array_ops.shape(op.inputs[0])[0]\n    if compat.forward_compatible(2021, 6, 10):\n        return (math_ops.sparse_segment_sum_grad(grad, op.inputs[1], op.inputs[2], dim0), None, None)\n    else:\n        return (math_ops.unsorted_segment_sum(array_ops.gather(grad, op.inputs[2]), op.inputs[1], dim0), None, None)",
    "docstring": "Gradient for SparseSegmentSum.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SparseSegmentSumGrad arg:op arg:grad arguments arg arg If Call Return return:yes Call Assign Call If Call Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "inplace_csr_row_scale",
    "source_code": "def inplace_csr_row_scale(X, scale):\n    assert scale.shape[0] == X.shape[0]\n    X.data *= np.repeat(scale, np.diff(X.indptr))",
    "docstring": "Inplace row scaling of a CSR matrix. Scale each sample of the data matrix by multiplying with specific scale provided by the caller assuming a (n_samples, n_features) shape. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix to be scaled. It should be of CSR format. scale : ndarray of float of shape (n_samples,) Array of precomputed sample-wise values to use for scaling.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py",
    "ast_data": "FunctionDef name:inplace_csr_row_scale arg:X arg:scale arguments arg arg Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "index",
    "source_code": "@property\ndef index(self):\n    return self.font._index_dvi_to_freetype(self.glyph)",
    "docstring": "The FreeType index of this glyph (that can be passed to FT_Load_Glyph).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, dim, size=1, random_state=None):\n    random_state = self._get_random_state(random_state)\n    size = int(size)\n    dim = self._process_parameters(dim)\n    size = (size,) if size > 1 else ()\n    z = random_state.normal(size=size + (dim, dim))\n    q, r = np.linalg.qr(z)\n    d = r.diagonal(offset=0, axis1=-2, axis2=-1)\n    q *= (d / abs(d))[..., np.newaxis, :]\n    return q",
    "docstring": "Draw random samples from O(N). Parameters ---------- dim : integer Dimension of rotation space (N). size : integer, optional Number of samples to draw (default 1). Returns ------- rvs : ndarray or scalar Random size N-dimensional matrices, dimension (size, dim, dim)",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:dim arg:size arg:random_state arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Compare Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_forward_backward_with_graph",
    "source_code": "def _create_forward_backward_with_graph(attrs, forward_graph, backwards_graph: func_graph_module.FuncGraph):\n    forward_function_name = _forward_name(forward_graph.name)\n    common_attributes = dict(attrs)\n    common_attributes.pop(attributes_lib.IMPLEMENTS, None)\n    backward_function_attr = attributes_lib.parse_func_attrs({attributes_lib.FORWARD_FUNCTION: forward_function_name})\n    backward_function_attr.update(common_attributes)\n    function_type = function_type_lib.from_structured_signature(((), {}), backwards_graph.structured_outputs, backwards_graph.function_captures.capture_types)\n    backward_function = ConcreteFunction.from_func_graph(backwards_graph, function_type, attrs=backward_function_attr)\n    forward_function_attr = attributes_lib.parse_func_attrs({attributes_lib.BACKWARD_FUNCTION: backward_function.name})\n    forward_function_attr.update(common_attributes)\n    forward_function = atomic_function.from_func_graph(forward_function_name, forward_graph, forward_function_attr)\n    return (forward_function, backward_function)",
    "docstring": "Creates forward and backward functions from the function graphs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:_create_forward_backward_with_graph arg:attrs arg:forward_graph arg:backwards_graph arguments arg arg arg Assign Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_available",
    "source_code": "def is_available():\n    return torch._nnpack_available()",
    "docstring": "Return whether PyTorch is built with NNPACK support.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\nnpack\\__init__.py",
    "ast_data": "FunctionDef name:is_available arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "complex_double",
    "source_code": "def complex_double(self):\n    return self._to(torch.cdouble)",
    "docstring": "Casts this storage to complex double type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:complex_double arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "put_back",
    "source_code": "def put_back(self, closure):\n    assert closure.tag is None\n    with self._queue_lock:\n        if self._inflight_closure_count < 1:\n            raise AssertionError('There is no inflight closures to put_back.')\n        if self._error:\n            closure.mark_cancelled()\n        else:\n            self._queue_free_slot_condition.wait_for(lambda: not self._queue.full())\n            self._queue.put(closure, block=False)\n            metric_utils.monitor_int('queued_closures', self._queue.qsize())\n            self._closures_queued_condition.notify()\n        self.inflight_closure_count -= 1\n        if self._inflight_closure_count == 0:\n            self._no_inflight_closure_condition.notify_all()",
    "docstring": "Put the closure back into the queue as it was not properly executed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:put_back arg:self arg:closure arguments arg arg Compare With If Compare Raise Call If Call Call arguments Call Call Call Call Call If Compare Call"
  },
  {
    "library": "scipy",
    "name": "splder",
    "source_code": "def splder(tck, n=1):\n    if isinstance(tck, BSpline):\n        return tck.derivative(n)\n    else:\n        return _impl.splder(tck, n)",
    "docstring": "Compute the spline representation of the derivative of a given spline .. legacy:: function Specifically, we recommend constructing a object and using its `BSplinetcksproot\\pi/2 + n\\pi\\cos(x) = \\sin'(x)splevsplderspaldespalde` examples section.",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack_py.py",
    "ast_data": "FunctionDef name:splder arg:tck arg:n arguments arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "zeros",
    "source_code": "@property\ndef zeros(self):\n    return self._zeros",
    "docstring": "Zeros of the system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:zeros arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "print_tensor",
    "source_code": "def print_tensor(path: OBJ_PATH, value: STATE_DICT_ITEM, print_fun: Callable[[str], None]=print) -> None:\n    _print_nested(value, prefix=str(path), print_fun=print_fun)",
    "docstring": "Use this callback with traverse_state_dict to print its content. By default the content is printed using the builtin ` callable.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_traverse.py",
    "ast_data": "FunctionDef name:print_tensor arg:path arg:value arg:print_fun arguments arg arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "Problem05",
    "source_code": "class Problem05(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(0.0, 1.2)]\n        self.global_optimum = 0.96609\n        self.fglob = -1.48907\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        return -(1.4 - 3 * x) * sin(18.0 * x)",
    "docstring": "Univariate Problem05 objective function. This class defines the Univariate Problem05 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem05}}(x) = - \\left(1.4 - 3x \\right) \\sin(18x) Bound constraints: :math: .. figure:: figures/Problem05.png :alt: Univariate Problem05 function :align: center **Univariate Problem05 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem05 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_export_model_json",
    "source_code": "def _export_model_json(model, saved_model_path):\n    model_json = model.to_json()\n    model_json_filepath = os.path.join(_get_or_create_assets_dir(saved_model_path), compat.as_text(SAVED_MODEL_FILENAME_JSON))\n    with gfile.Open(model_json_filepath, 'w') as f:\n        f.write(model_json)",
    "docstring": "Saves model configuration as a json string under assets folder.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model_experimental.py",
    "ast_data": "FunctionDef name:_export_model_json arg:model arg:saved_model_path arguments arg arg Assign Call Assign Call Call Call With Call Call"
  },
  {
    "library": "scipy",
    "name": "__repr__",
    "source_code": "def __repr__(self):\n    return f'{self.__class__.__name__}(\\n{repr(self.num)},\\n{repr(self.den)},\\ndt: {repr(self.dt)}\\n)'",
    "docstring": "Return representation of the system's transfer function",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "urlize",
    "source_code": "@register.filter(is_safe=True, needs_autoescape=True)\n@stringfilter\ndef urlize(value, autoescape=True):\n    return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape))",
    "docstring": "Convert URLs in plain text into clickable links.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:urlize arg:value arg:autoescape arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_adjust_max_rows",
    "source_code": "def _adjust_max_rows(self, max_rows: int | None) -> int | None:\n    if max_rows:\n        if len(self.frame) > max_rows and self.min_rows:\n            max_rows = min(self.min_rows, max_rows)\n    return max_rows",
    "docstring": "Adjust max_rows using display logic. See description here: GH #37359",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:_adjust_max_rows arg:self arg:max_rows arguments arg arg If If BoolOp Compare Call Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "encrypt",
    "source_code": "def encrypt(self, msg, aad, iv, key):\n    self.check_iv(iv)\n    chacha = Cryptodome_ChaCha20_Poly1305.new(key=key, nonce=iv)\n    chacha.update(aad)\n    ciphertext, tag = chacha.encrypt_and_digest(msg)\n    return (ciphertext, tag)",
    "docstring": "Content Encryption with AEAD_XCHACHA20_POLY1305. :param msg: text to be encrypt in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param key: encrypted key in bytes :return: (ciphertext, tag)",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\drafts\\_jwe_enc_cryptodome.py",
    "ast_data": "FunctionDef name:encrypt arg:self arg:msg arg:aad arg:iv arg:key arguments arg arg arg arg arg Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "plot",
    "source_code": "def plot(self, *, ax=None, name=None, ref_line=True, **kwargs):\n    self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name)\n    info_pos_label = f'(Positive class: {self.pos_label})' if self.pos_label is not None else ''\n    default_line_kwargs = {'marker': 's', 'linestyle': '-'}\n    if name is not None:\n        default_line_kwargs['label'] = name\n    line_kwargs = _validate_style_kwargs(default_line_kwargs, kwargs)\n    ref_line_label = 'Perfectly calibrated'\n    existing_ref_line = ref_line_label in self.ax_.get_legend_handles_labels()[1]\n    if ref_line and (not existing_ref_line):\n        self.ax_.plot([0, 1], [0, 1], 'k:', label=ref_line_label)\n    self.line_ = self.ax_.plot(self.prob_pred, self.prob_true, **line_kwargs)[0]\n    self.ax_.legend(loc='lower right')\n    xlabel = f'Mean predicted probability {info_pos_label}'\n    ylabel = f'Fraction of positives {info_pos_label}'\n    self.ax_.set(xlabel=xlabel, ylabel=ylabel)\n    return self",
    "docstring": "Plot visualization. Extra keyword arguments will be passed to :func:. Parameters ---------- ax : Matplotlib Axes, default=None Axes object to plot on. If , a new figure and axes is created. name : str, default=None Name for labeling curve. If , use if not , otherwise no labeling is shown. ref_line : bool, default=True If , plots a reference line representing a perfectly calibrated classifier. **kwargs : dict Keyword arguments to be passed to :func:. Returns ------- display : :class: Object that stores computed values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\calibration.py",
    "ast_data": "FunctionDef name:plot arg:self arguments arg arg arg arg arg Assign Call Assign Compare Assign If Compare Assign Assign Call Assign Assign Compare Call If BoolOp Call Assign Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_definition_directive",
    "source_code": "def get_definition_directive(self, node, directive, arg, default):\n    defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())\n    if not defs:\n        return default\n    arg_values_found = []\n    for def_ in defs:\n        if directive in def_.directives and arg in def_.directives[directive]:\n            arg_values_found.append(def_.directives[directive][arg])\n    if not arg_values_found:\n        return default\n    if len(arg_values_found) == 1:\n        return arg_values_found[0]\n    first_value = arg_values_found[0]\n    for other_value in arg_values_found[1:]:\n        if not ast_util.matches(first_value, other_value):\n            qn = anno.getanno(node, anno.Basic.QN)\n            raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' % (qn, directive.__name__, arg, parser.unparse(other_value).strip(), parser.unparse(first_value).strip()))\n    return first_value",
    "docstring": "Returns the unique directive argument for a symbol. See lang/directives.py for details on directives. Example: # Given a directive in the code: ag.foo_directive(bar, baz=1) # One can write for an AST node Name(id='bar'): get_definition_directive(node, ag.foo_directive, 'baz') Args: node: ast.AST, the node representing the symbol for which the directive argument is needed. directive: Callable[..., Any], the directive to search. arg: str, the directive argument to return. default: Any Raises: ValueError: if conflicting annotations have been found",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\converter.py",
    "ast_data": "FunctionDef name:get_definition_directive arg:self arg:node arg:directive arg:arg arg:default arguments arg arg arg arg arg Assign Call If Return return:yes Assign For If BoolOp Compare Compare Call If Return return:yes If Compare Call Return return:yes Assign For If Call Assign Call Raise Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "mean",
    "source_code": "def mean(self, n, p):\n    n, p, npcond = self._process_parameters(n, p)\n    result = n[..., np.newaxis] * p\n    return self._checkresult(result, npcond, np.nan)",
    "docstring": "Mean of the Multinomial distribution. Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float The mean of the distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:mean arg:self arg:n arg:p arguments arg arg arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_check",
    "source_code": "def _check(self):\n    if self.ndim != len(self.coords):\n        raise ValueError(f'mismatching number of index arrays for shape; got {len(self.coords)}, expected {self.ndim}')\n    for i, idx in enumerate(self.coords):\n        if idx.dtype.kind != 'i':\n            warn(f'index array {i} has non-integer dtype ({idx.dtype.name})', stacklevel=3)\n    idx_dtype = self._get_index_dtype(self.coords, maxval=max(self.shape))\n    self.coords = tuple((np.asarray(idx, dtype=idx_dtype) for idx in self.coords))\n    self.data = to_native(self.data)\n    if self.nnz > 0:\n        for i, idx in enumerate(self.coords):\n            if idx.max() >= self.shape[i]:\n                raise ValueError(f'axis {i} index {idx.max()} exceeds matrix dimension {self.shape[i]}')\n            if idx.min() < 0:\n                raise ValueError(f'negative axis {i} index: {idx.min()}')",
    "docstring": "Checks data structure for consistency",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_coo.py",
    "ast_data": "FunctionDef name:_check arg:self arguments arg If Compare Call Raise Call Call For Call If Compare Call Assign Call Call Assign Call Call Assign Call If Compare For Call If Compare Call Raise Call Call If Compare Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_shape_as_tensor",
    "source_code": "def _shape_as_tensor(shape, dtype):\n    if dtype != dtypes.int64 and dtype != dtypes.int32:\n        raise ValueError(f'Expected int64 or int32 for dtype: got {dtype}.')\n    if isinstance(shape, tensor_lib.Tensor):\n        if shape.dtype != dtypes.int64 and shape.dtype != dtypes.int32:\n            return math_ops.cast(shape, dtype)\n        return shape\n    shape = tensor_shape.as_shape(shape)\n    if not shape:\n        return constant_op.constant(-1, dtype=dtype)\n    shape = [-1 if x is None else x for x in shape.as_list()]\n    return constant_op.constant(shape, dtype=dtype)",
    "docstring": "Takes shape and coerces it to a shape as a tensor. If the object is already a tensor, simply passes it on (result is guaranteed to be int64 or int32, but not necessarily dtype). If not, creates a tensor of type dtype. Result is either a scalar equal to -1 if the shape is unknown_rank. Otherwise, it is a vector, where unknown dimensions are represented with a value of -1. In C++, see TensorShapeFromTensor for parsing shapes in kernels, and InferenceContext::MakeShapeFromShapeTensorTreatScalarAsUnknownShape, for use in the shape inference function. Args: shape: input to coerce from TensorShape, Tensor, None, List[Optional[Int]], Tuple[Optional[Int]]. dtype: tf.int64 or tf.int32 Returns: a scalar or vector tensor of dtype tf.int32 or tf.int64.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_shape_as_tensor arg:shape arg:dtype arguments arg arg If BoolOp Compare Compare Raise Call If Call If BoolOp Compare Compare Return return:yes Call Return return:yes Assign Call If Return return:yes Call Assign Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_rotate_dims",
    "source_code": "def _maybe_rotate_dims(self, x, rotate_right=False):\n    needs_rotation_const = tensor_util.constant_value(self._needs_rotation)\n    if needs_rotation_const is not None and (not needs_rotation_const):\n        return x\n    ndims = array_ops.rank(x)\n    n = ndims - self._rotate_ndims if rotate_right else self._rotate_ndims\n    return array_ops.transpose(x, _concat_vectors(math_ops.range(n, ndims), math_ops.range(0, n)))",
    "docstring": "Helper which rolls left event_dims left or right event_dims right.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:_maybe_rotate_dims arg:self arg:x arg:rotate_right arguments arg arg arg Assign Call If BoolOp Compare Return return:yes Assign Call Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "generate_keras_metadata",
    "source_code": "def generate_keras_metadata(saved_nodes, node_paths):\n    metadata = saved_metadata_pb2.SavedMetadata()\n    for node_id, node in enumerate(saved_nodes):\n        if isinstance(node, base_layer.Layer):\n            path = node_paths[node]\n            if not path:\n                node_path = 'root'\n            else:\n                node_path = 'root.{}'.format('.'.join([ref.name for ref in path]))\n            metadata.nodes.add(node_id=node_id, node_path=node_path, version=versions_pb2.VersionDef(producer=1, min_consumer=1, bad_consumers=[]), identifier=node._object_identifier, metadata=node._tracking_metadata)\n    return metadata",
    "docstring": "Constructs a KerasMetadata proto with the metadata of each keras object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save.py",
    "ast_data": "FunctionDef name:generate_keras_metadata arg:saved_nodes arg:node_paths arguments arg arg Assign Call For Call If Call Assign If Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tag_sharding_attribute_for_dequeued_tensor",
    "source_code": "def _tag_sharding_attribute_for_dequeued_tensor(tensor, dims):\n    if dims is None:\n        return xla_sharding.replicate(tensor, assign_tuple_sharding=True)\n    elif np.prod(dims) == 1:\n        return xla_sharding.assign_device(tensor, 0, assign_tuple_sharding=True)\n    else:\n        tile_assignment = np.arange(np.prod(dims)).reshape(dims)\n        return xla_sharding.tile(tensor=tensor, tile_assignment=tile_assignment, assign_tuple_sharding=True)",
    "docstring": "Tags appropriate XLA sharding attribute to the dequeued tensor. The sharding attribute of the dequeued tensor will be a tuple. Args: tensor: The dequeued tensor on TPU. dims: A list of integer describes how the tensor is partitioned. Returns: The same tensor with the xla_sharding attribute.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:_tag_sharding_attribute_for_dequeued_tensor arg:tensor arg:dims arguments arg arg If Compare Return return:yes Call If Compare Call Return return:yes Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_bytes_from_wsgi",
    "source_code": "def get_bytes_from_wsgi(environ, key, default):\n    value = environ.get(key, default)\n    return value.encode('iso-8859-1')",
    "docstring": "Get a value from the WSGI environ dictionary as bytes. key and default should be strings.",
    "type": "function",
    "file_path": "django\\django\\core\\handlers\\wsgi.py",
    "ast_data": "FunctionDef name:get_bytes_from_wsgi arg:environ arg:key arg:default arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "AdaptiveAvgPool1d",
    "source_code": "class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd):\n    output_size: _size_1_t\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.adaptive_avg_pool1d(input, self.output_size)",
    "docstring": "Applies a 1D adaptive average pooling over an input signal composed of several input planes. The output size is :math:, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size :math:. Shape: - Input: :math: or :math:. - Output: :math: or :math:, where :math:. Examples: >>> # target output size of 5 >>> m = nn.AdaptiveAvgPool1d(5) >>> input = torch.randn(1, 64, 8) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\pooling.py",
    "ast_data": "ClassDef name:AdaptiveAvgPool1d FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "named_buffers",
    "source_code": "def named_buffers(self, remove_duplicate: bool=True) -> Iterable[tuple[str, torch.Tensor]]:\n    yield from self.module.named_buffers(remove_duplicate=remove_duplicate)",
    "docstring": "Iterate over all the buffers in the module.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py",
    "ast_data": "FunctionDef name:named_buffers arg:self arg:remove_duplicate arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "evaluate_tensor_slice",
    "source_code": "def evaluate_tensor_slice(tensor, tensor_slicing):\n    _ = tensor\n    if not validate_slicing_string(tensor_slicing):\n        raise ValueError('Invalid tensor-slicing string.')\n    return tensor[_parse_slices(tensor_slicing)]",
    "docstring": "Call eval on the slicing of a tensor, with validation. Args: tensor: (numpy ndarray) The tensor value. tensor_slicing: (str or None) Slicing of the tensor, e.g., \"[:, 1]\". If None, no slicing will be performed on the tensor. Returns: (numpy ndarray) The sliced tensor. Raises: ValueError: If tensor_slicing is not a valid numpy ndarray slicing str.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py",
    "ast_data": "FunctionDef name:evaluate_tensor_slice arg:tensor arg:tensor_slicing arguments arg arg Assign If Call Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_dict_to_tensor",
    "source_code": "def _dict_to_tensor(self, x, k):\n    return array_ops_stack.stack([x[i] for i in range(k)])",
    "docstring": "Convert a dictionary to a tensor. Args: x: A dictionary of length k. k: Dimension of x. Returns: A tensor with the same dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:_dict_to_tensor arg:self arg:x arg:k arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "fftshift",
    "source_code": "@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')\ndef fftshift(x, axes=None):\n    x = asarray(x)\n    if axes is None:\n        axes = tuple(range(x.ndim))\n        shift = [dim // 2 for dim in x.shape]\n    elif isinstance(axes, integer_types):\n        shift = x.shape[axes] // 2\n    else:\n        shift = [x.shape[ax] // 2 for ax in axes]\n    return roll(x, shift, axes)",
    "docstring": "Shift the zero-frequency component to the center of the spectrum. This function swaps half-spaces for all axes listed (defaults to all). Note that `fftshift`. Examples -------- >>> import numpy as np >>> freqs = np.fft.fftfreq(10, 0.1) >>> freqs array([ 0., 1., 2., ..., -3., -2., -1.]) >>> np.fft.fftshift(freqs) array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) Shift the zero-frequency component only along the second axis: >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) >>> freqs array([[ 0., 1., 2.], [ 3., 4., -4.], [-3., -2., -1.]]) >>> np.fft.fftshift(freqs, axes=(1,)) array([[ 2., 0., 1.], [-4., 3., 4.], [-1., -3., -2.]])",
    "type": "function",
    "file_path": "numpy\\numpy\\fft\\_helper.py",
    "ast_data": "FunctionDef name:fftshift arg:x arg:axes arguments arg arg Assign Call If Compare Assign Call Call Assign If Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "state",
    "source_code": "@property\ndef state(self):\n    return self._state",
    "docstring": "The bus state.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\win32.py",
    "ast_data": "FunctionDef name:state arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_imports_for_symbol",
    "source_code": "def add_imports_for_symbol(module_code_builder, symbol, source_module_name, source_name, api_name, api_version, output_module_prefix=''):\n    if api_version == 1:\n        names_attr = API_ATTRS_V1[api_name].names\n        constants_attr = API_ATTRS_V1[api_name].constants\n    else:\n        names_attr = API_ATTRS[api_name].names\n        constants_attr = API_ATTRS[api_name].constants\n    if source_name == constants_attr:\n        for exports, name in symbol:\n            for export in exports:\n                dest_module, dest_name = _get_name_and_module(export)\n                dest_module = _join_modules(output_module_prefix, dest_module)\n                module_code_builder.add_import(None, source_module_name, name, dest_module, dest_name)\n    if hasattr(symbol, '__dict__') and names_attr in symbol.__dict__:\n        for export in getattr(symbol, names_attr):\n            dest_module, dest_name = _get_name_and_module(export)\n            dest_module = _join_modules(output_module_prefix, dest_module)\n            module_code_builder.add_import(symbol, source_module_name, source_name, dest_module, dest_name)",
    "docstring": "Add imports for the given symbol to . Args: module_code_builder: instance. symbol: A symbol. source_module_name: Module that we can import the symbol from. source_name: Name we can import the symbol with. api_name: API name. Currently, must be . api_version: API version. output_module_prefix: Prefix to prepend to destination module.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator\\create_python_api.py",
    "ast_data": "FunctionDef name:add_imports_for_symbol arg:module_code_builder arg:symbol arg:source_module_name arg:source_name arg:api_name arg:api_version arg:output_module_prefix arguments arg arg arg arg arg arg arg If Compare Assign Assign Assign Assign If Compare For For Assign Call Assign Call Call If BoolOp Call Compare For Call Assign Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "import_epsg",
    "source_code": "def import_epsg(self, epsg):\n    capi.from_epsg(self.ptr, epsg)",
    "docstring": "Import the Spatial Reference from the EPSG code (an integer).",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:import_epsg arg:self arg:epsg arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "f",
    "source_code": "def f(x):\n    cast_types = (tensor.Tensor, sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor)\n    if isinstance(x, cast_types) and x.dtype.is_floating and (x.dtype.base_dtype.name != compute_dtype):\n        return math_ops.cast(x, compute_dtype)\n    elif isinstance(x, tensor.TensorSpec) and x.dtype.is_floating:\n        return tensor.TensorSpec(x.shape, compute_dtype, x.name)\n    else:\n        return x",
    "docstring": "Cast a single Tensor or TensorSpec to the compute dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:f arg:x arguments arg Assign If BoolOp Call Compare Return return:yes Call If BoolOp Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, parameters: list[Union[FuzzedParameter, list[FuzzedParameter]]], tensors: list[Union[FuzzedTensor, list[FuzzedTensor]]], constraints: Optional[list[Callable]]=None, seed: Optional[int]=None):\n    import numpy as np\n    if seed is None:\n        seed = int(np.random.RandomState().randint(0, 2 ** 32 - 1, dtype=np.int64))\n    self._seed = seed\n    self._parameters = Fuzzer._unpack(parameters, FuzzedParameter)\n    self._tensors = Fuzzer._unpack(tensors, FuzzedTensor)\n    self._constraints = constraints or ()\n    p_names = {p.name for p in self._parameters}\n    t_names = {t.name for t in self._tensors}\n    name_overlap = p_names.intersection(t_names)\n    if name_overlap:\n        raise ValueError(f'Duplicate names in parameters and tensors: {name_overlap}')\n    self._rejections = 0\n    self._total_generated = 0",
    "docstring": "Args: parameters: List of FuzzedParameters which provide specifications for generated parameters. Iterable elements will be unpacked, though arbitrary nested structures will not. tensors: List of FuzzedTensors which define the Tensors which will be created each step based on the parameters for that step. Iterable elements will be unpacked, though arbitrary nested structures will not. constraints: List of callables. They will be called with params as kwargs, and if any of them return False the current set of parameters will be rejected. seed: Seed for the RandomState used by the Fuzzer. This will also be used to set the PyTorch random seed so that random ops will create reproducible Tensors.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\fuzzer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:parameters arg:tensors arg:constraints arg:seed arguments arg arg arg arg arg If Compare Assign Call Call Call Assign Assign Call Assign Call Assign BoolOp Assign Assign Assign Call If Raise Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "add_n",
    "source_code": "@tf_export('math.add_n', 'add_n')\n@dispatch.add_dispatch_support(iterable_parameters=['inputs'])\ndef add_n(inputs, name=None):\n    if not inputs or not isinstance(inputs, collections_abc.Iterable):\n        raise ValueError('Inputs must be an iterable of at least one Tensor/IndexedSlices with the same dtype and shape.')\n    inputs = indexed_slices.convert_n_to_tensor_or_indexed_slices(inputs)\n    if not all((isinstance(x, (tensor_lib.Tensor, indexed_slices.IndexedSlices)) for x in inputs)):\n        raise ValueError('Inputs must be an iterable of at least one Tensor/IndexedSlices with the same dtype and shape.')\n    if len(inputs) == 1:\n        if isinstance(inputs[0], indexed_slices.IndexedSlices):\n            values = ops.convert_to_tensor(inputs[0])\n        else:\n            values = inputs[0]\n        if name:\n            return array_ops.identity(values, name=name)\n        return values\n    return gen_math_ops.add_n(inputs, name=name)",
    "docstring": "Returns the element-wise sum of a list of tensors. All inputs in the list must have the same shape. This op does not [broadcast]( its inputs. If you need broadcasting, use (or the operator) instead. For example: >>> a = tf.constant([[3, 5], [4, 8]]) >>> b = tf.constant([[1, 6], [2, 9]]) >>> tf.math.add_n([a, b, a]).numpy() array([[ 7, 16], [10, 25]], dtype=int32) See Also: * - This performs the same mathematical operation, but may be more efficient because it sums the tensors directly. on the other hand calls on the list of tensors, unnecessarily stacking them into a single tensor before summing. Args: inputs: A list of or objects, each with the same shape and type. objects will be converted into dense tensors prior to adding. name: A name for the operation (optional). Returns: A of the same shape and type as the elements of . Raises: ValueError: If don't all have same shape and dtype or the shape cannot be inferred.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:add_n arg:inputs arg:name arguments arg arg If BoolOp Call Raise Call Assign Call If Call Call Raise Call If Compare Call If Call Assign Call Assign If Return return:yes Call Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ShardDataset",
    "source_code": "class _ShardDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, num_shards, index, name):\n        self._input_dataset = input_dataset\n        self._num_shards = ops.convert_to_tensor(num_shards, dtype=dtypes.int64, name='num_shards')\n        self._index = ops.convert_to_tensor(index, dtype=dtypes.int64, name='index')\n        self._name = name\n        variant_tensor = gen_dataset_ops.shard_dataset(input_dataset._variant_tensor, num_shards=self._num_shards, index=self._index, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)",
    "docstring": "A for sharding its input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\shard_op.py",
    "ast_data": "ClassDef name:_ShardDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:num_shards arg:index arg:name arguments arg arg arg arg arg Assign Assign Call Assign Call Assign Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_hypersphere_surface_sample",
    "source_code": "def _hypersphere_surface_sample(self, center: np.ndarray, radius: DecimalNumber, candidates: IntNumber=1) -> np.ndarray:\n    vec = self.rng.standard_normal(size=(candidates, self.d))\n    vec /= np.linalg.norm(vec, axis=1)[:, None]\n    p = center + np.multiply(vec, radius)\n    return p",
    "docstring": "Uniform sampling on the hypersphere's surface.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:_hypersphere_surface_sample arg:self arg:center arg:radius arg:candidates arguments arg arg arg arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "replace_capture_with_deferred_capture",
    "source_code": "def replace_capture_with_deferred_capture(self, tensor, closure, spec, placeholder, default_value=None):\n    self._function_captures.pop(id(tensor), is_by_ref=False)\n    self.capture_call_time_value(closure, spec, key=id(tensor), default_value=default_value, placeholder=placeholder)",
    "docstring": "Replaces existing capture with a deferred capture . Caution: It is the caller's responsibility to make sure that, after calling this function, the TypeSpec of the (i.e. internal placeholders) and the (i.e. external captures) of a concrete function that wraps this function graph are still compatible. Thus user should pairing usage of this function with to make sure the order still matches. For example, Args: tensor: Tensor already captured. closure: function which takes no arguments, to be evaluated at function call time, returning a nest of tensors compatible with . spec: nest of TypeSpec for the value to capture. placeholder: the internal placeholder corresponding to the captured . default_value: optional value to use in environments that cannot safely evaluate closure.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:replace_capture_with_deferred_capture arg:self arg:tensor arg:closure arg:spec arg:placeholder arg:default_value arguments arg arg arg arg arg arg Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, step, offset):\n    if step <= 0:\n        raise ValueError(\"'step' must be positive\")\n    self.step = step\n    self._offset = abs(offset)",
    "docstring": "Parameters ---------- step : float > 0 Interval between ticks. offset : float Offset subtracted from the data limits prior to calculating tick locations.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:step arg:offset arguments arg arg arg If Compare Raise Call Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_bending_matrices",
    "source_code": "def get_bending_matrices(self, J, ecc):\n    n = np.size(ecc, 0)\n    J1 = self.J0_to_J1 @ J\n    J2 = self.J0_to_J2 @ J\n    DOF_rot = np.zeros([n, 9, 9], dtype=np.float64)\n    DOF_rot[:, 0, 0] = 1\n    DOF_rot[:, 3, 3] = 1\n    DOF_rot[:, 6, 6] = 1\n    DOF_rot[:, 1:3, 1:3] = J\n    DOF_rot[:, 4:6, 4:6] = J1\n    DOF_rot[:, 7:9, 7:9] = J2\n    H_rot, area = self.get_Hrot_from_J(J, return_area=True)\n    K = np.zeros([n, 9, 9], dtype=np.float64)\n    weights = self.gauss_w\n    pts = self.gauss_pts\n    for igauss in range(self.n_gauss):\n        alpha = np.tile(pts[igauss, :], n).reshape(n, 3)\n        alpha = np.expand_dims(alpha, 2)\n        weight = weights[igauss]\n        d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc)\n        d2Skdx2 = d2Skdksi2 @ H_rot\n        K += weight * (d2Skdx2 @ self.E @ _transpose_vectorized(d2Skdx2))\n    K = _transpose_vectorized(DOF_rot) @ K @ DOF_rot\n    return _scalar_vectorized(area, K)",
    "docstring": "Parameters ---------- *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at triangle first apex) *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle eccentricities Returns ------- Returns the element K matrices for bending energy expressed in GLOBAL nodal coordinates. K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA] tri_J is needed to rotate dofs from local basis to global basis",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:get_bending_matrices arg:self arg:J arg:ecc arguments arg arg arg Assign Call Assign Assign Assign Call Assign Assign Assign Assign Assign Assign Assign Call Assign Call Assign Assign For Call Assign Call Call Assign Call Assign Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "list",
    "source_code": "def list():\n    pass",
    "docstring": "Return a list with the names of all spiders available in the project",
    "type": "method",
    "file_path": "scrapy\\scrapy\\interfaces.py",
    "ast_data": "FunctionDef name:list arguments"
  },
  {
    "library": "django",
    "name": "get_dated_items",
    "source_code": "def get_dated_items(self):\n    raise NotImplementedError('A DateView must provide an implementation of get_dated_items()')",
    "docstring": "Obtain the list of dates and items.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_dated_items arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_node_target",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef get_node_target(submodules: Mapping[str, torch.nn.Module], node: torch.fx.Node) -> str:\n    assert node.op in CALLABLE_NODE_OPS, 'Expect op types of ' + ', '.join(CALLABLE_NODE_OPS) + f', but found {node.op}'\n    if node.op == 'call_module':\n        assert isinstance(node.target, str)\n        submod = submodules[node.target]\n        submod_type = getattr(submod, '_base_class_origin', type(submod))\n        return get_acc_ops_name(submod_type)\n    elif node.op == 'call_function':\n        target: Any = node.target\n        return f'acc_ops.{target.__name__}' if target.__module__ is not None and 'acc_ops' in target.__module__ else _get_qualified_name(target)\n    else:\n        assert isinstance(node.target, str)\n        return node.target",
    "docstring": "Given a returns its target typename. For \"call_method\" node, return node.target which is the name of that method being called. This could potential lead to conflict but should be okay because normally it's on a tensor. For \"call_function\" node, return typename of node.target. For \"call_module\" node, return typename of the module that node.target point to. If seeing \"_VariableFunctionsClass\" in the target name string, it will be replaced by \"torch\". e.g. _VariableFunctionsClass.relu would become torch.relu.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\tools_common.py",
    "ast_data": "FunctionDef name:get_node_target arg:submodules arg:node arguments arg arg Compare Call If Compare Call Assign Assign Call Call Return return:yes Call If Compare Return return:yes BoolOp Compare Compare Call Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "initialization_vector",
    "source_code": "@property\n@abc.abstractmethod\ndef initialization_vector(self) -> utils.Buffer:\n    pass",
    "docstring": "The value of the initialization vector for this mode as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\modes.py",
    "ast_data": "FunctionDef name:initialization_vector arg:self arguments arg"
  },
  {
    "library": "seaborn",
    "name": "map_offdiag",
    "source_code": "def map_offdiag(self, func, **kwargs):\n    if self.square_grid:\n        self.map_lower(func, **kwargs)\n        if not self._corner:\n            self.map_upper(func, **kwargs)\n    else:\n        indices = []\n        for i, y_var in enumerate(self.y_vars):\n            for j, x_var in enumerate(self.x_vars):\n                if x_var != y_var:\n                    indices.append((i, j))\n        self._map_bivariate(func, indices, **kwargs)\n    return self",
    "docstring": "Plot with a bivariate function on the off-diagonal subplots. Parameters ---------- func : callable plotting function Must take x, y arrays as positional arguments and draw onto the \"currently active\" matplotlib Axes. Also needs to accept kwargs called ``.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:map_offdiag arg:self arg:func arguments arg arg arg If Call If Call Assign For Call For Call If Compare Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_DeviceAttributes",
    "source_code": "class _DeviceAttributes(object):\n\n    def __init__(self, name, device_type, memory_limit_bytes, incarnation):\n        self._name = device.canonical_name(name)\n        self._device_type = device_type\n        self._memory_limit_bytes = memory_limit_bytes\n        self._incarnation = incarnation\n\n    @property\n    def name(self):\n        return self._name\n\n    @property\n    def device_type(self):\n        return self._device_type\n\n    @property\n    def memory_limit_bytes(self):\n        return self._memory_limit_bytes\n\n    @property\n    def incarnation(self):\n        return self._incarnation\n\n    def __repr__(self):\n        return '_DeviceAttributes(%s, %s, %d, %d)' % (self.name, self.device_type, self.memory_limit_bytes, self.incarnation)",
    "docstring": "Struct-like object describing a device's attributes. Each device has 3 key properties: - name: the fully-qualified TensorFlow path to the device. For example: /job:worker/replica:0/task:3/device:CPU:0 - device_type: the type of the device (e.g. CPU, GPU, TPU, etc.) - memory_limit_bytes: the maximum amount of memory available on the device (in bytes).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "ClassDef name:_DeviceAttributes FunctionDef name:__init__ arg:self arg:name arg:device_type arg:memory_limit_bytes arg:incarnation arguments arg arg arg arg arg Assign Call Assign Assign Assign FunctionDef name:name arg:self arguments arg Return return:yes FunctionDef name:device_type arg:self arguments arg Return return:yes FunctionDef name:memory_limit_bytes arg:self arguments arg Return return:yes FunctionDef name:incarnation arg:self arguments arg Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pygame",
    "name": "alive",
    "source_code": "def alive(self):\n    return bool(self.__g)",
    "docstring": "does the sprite belong to any groups Sprite.alive(): return bool Returns True when the Sprite belongs to one or more Groups.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:alive arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_custom_objects_by_name",
    "source_code": "def get_custom_objects_by_name(item, custom_objects=None):\n    if item in _GLOBAL_CUSTOM_OBJECTS:\n        return _GLOBAL_CUSTOM_OBJECTS[item]\n    elif custom_objects and item in custom_objects:\n        return custom_objects[item]\n    return None",
    "docstring": "Returns the item if it is in either local or global custom objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:get_custom_objects_by_name arg:item arg:custom_objects arguments arg arg If Compare Return return:yes If BoolOp Compare Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_",
    "source_code": "@impl.register\ndef _(lib: Library, name: str, dispatch_key: str='') -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:\n\n    def wrap(f: Callable[_P, _T]) -> Callable[_P, _T]:\n        lib.impl(name, f, dispatch_key)\n        return f\n    return wrap",
    "docstring": "Legacy torch.library.impl API. Kept around for BC",
    "type": "function",
    "file_path": "pytorch\\torch\\library.py",
    "ast_data": "FunctionDef name:_ arg:lib arg:name arg:dispatch_key arguments arg arg arg FunctionDef name:wrap arg:f arguments arg Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "remove_guards_with_source",
    "source_code": "def remove_guards_with_source(self, source):\n    from ._dynamo.source import is_from_source\n    self.inner = {g for g in self.inner if not is_from_source(g.originating_source, source)}",
    "docstring": "Delete all guards that contains a given source",
    "type": "method",
    "file_path": "pytorch\\torch\\_guards.py",
    "ast_data": "FunctionDef name:remove_guards_with_source arg:self arg:source arguments arg arg Assign Call"
  },
  {
    "library": "scipy",
    "name": "_deprecated",
    "source_code": "def _deprecated(msg, stacklevel=2):\n\n    def wrap(fun):\n        if isinstance(fun, type):\n            warnings.warn(f'Trying to deprecate class {fun!r}', category=RuntimeWarning, stacklevel=2)\n            return fun\n\n        @functools.wraps(fun)\n        def call(*args, **kwargs):\n            warnings.warn(msg, category=DeprecationWarning, stacklevel=stacklevel)\n            return fun(*args, **kwargs)\n        call.__doc__ = fun.__doc__\n        return call\n    return wrap",
    "docstring": "Deprecate a function by emitting a warning on use.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\deprecation.py",
    "ast_data": "FunctionDef name:_deprecated arg:msg arg:stacklevel arguments arg arg FunctionDef name:wrap arg:fun arguments arg If Call Call Return return:yes FunctionDef name:call arguments arg arg Call Return return:yes Call Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "triton_reshape",
    "source_code": "def triton_reshape(value: str, old_shape: Sequence[sympy.Expr], new_shape: Sequence[sympy.Expr]) -> str:\n    assert isinstance(old_shape, list) and isinstance(new_shape, list)\n    old_shape_str = [V.kernel.index_to_str(shape) for shape in old_shape]\n    new_shape_str = [V.kernel.index_to_str(shape) for shape in new_shape]\n    if old_shape_str == new_shape_str:\n        return value\n    if [s for s in new_shape_str if s != '1'] != old_shape_str:\n        return f'tl.reshape({value}, [{', '.join(new_shape_str)}])'\n    idx = 0\n    expand = []\n    for size in new_shape_str:\n        if idx < len(old_shape_str) and size == old_shape_str[idx]:\n            expand.append(':')\n            idx += 1\n        else:\n            assert size == '1'\n            expand.append('None')\n    assert idx == len(old_shape_str)\n    return f'{value}[{', '.join(expand)}]'",
    "docstring": "Workaround",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:triton_reshape arg:value arg:old_shape arg:new_shape arguments arg arg arg BoolOp Call Call Assign Call Assign Call If Compare Return return:yes If Compare Compare Return return:yes Call Assign Assign For If BoolOp Compare Call Compare Call Compare Call Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_n",
    "source_code": "@dispatch.dispatch_for_api(math_ops.add_n)\ndef add_n(inputs: typing.List[ragged_tensor.RaggedOrDense], name=None):\n    if len(inputs) < 0:\n        raise ValueError('tf.add_n: expected at least one input.')\n    with ops.name_scope(name, 'RaggedAddN', inputs):\n        return ragged_functional_ops.map_flat_values(math_ops.add_n, inputs)",
    "docstring": "RaggedTensor implementation for tf.math.add_n.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:add_n arg:inputs arg:name arguments arg arg If Compare Call Raise Call With Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "I18nTags",
    "source_code": "class I18nTags(Tags):\n\n    def eval_condition(self, condition: Any) -> bool:\n        return True",
    "docstring": "Dummy tags module for I18nBuilder. To ensure that all text inside `` regardless the defined tags.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\builders\\gettext.py",
    "ast_data": "ClassDef name:I18nTags FunctionDef name:eval_condition arg:self arg:condition arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_stepped_value",
    "source_code": "def _stepped_value(self, val):\n    if isinstance(self.valstep, Number):\n        val = self.valmin + round((val - self.valmin) / self.valstep) * self.valstep\n    elif self.valstep is not None:\n        valstep = np.asanyarray(self.valstep)\n        if valstep.ndim != 1:\n            raise ValueError(f'valstep must have 1 dimension but has {valstep.ndim}')\n        val = valstep[np.argmin(np.abs(valstep - val))]\n    return val",
    "docstring": "Return *val* coerced to closest number in the `` grid.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_stepped_value arg:self arg:val arguments arg arg If Call Assign Call If Compare Assign Call If Compare Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "translation",
    "source_code": "@property\ndef translation(self) -> Vector3 | Tensor:\n    return self._translation",
    "docstring": "Return the underlying translation vector of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:translation arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "deduce_output_dtype_by_name",
    "source_code": "def deduce_output_dtype_by_name(op_name: str, *args: Any, **kwargs: Any) -> Optional[torch.dtype]:\n    if op_name in boolean_ops():\n        return torch.bool\n    elif op_name in ('to_dtype', 'index_expr'):\n        return kwargs['dtype'] if 'dtype' in kwargs else args[-1]\n    elif op_name in ('rand', 'randn'):\n        return torch.float\n    elif op_name in ('get_index', 'randint64', 'load_seed'):\n        return torch.int64\n    elif op_name == 'reduction':\n        return kwargs['dtype'] if 'dtype' in kwargs else args[1]\n    elif op_name == 'constant':\n        return kwargs['dtype'] if 'dtype' in kwargs else args[-1]\n    elif op_name in ('load', 'store', 'store_reduction'):\n        buf_name = args[1]\n        return V.graph.get_dtype(buf_name)\n    elif op_name == 'to_dtype_bitcast':\n        return kwargs['dtype'] if 'dtype' in kwargs else args[-2]\n    return None",
    "docstring": "Given op name and a list of input dtypes, deduce the output dtype",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "FunctionDef name:deduce_output_dtype_by_name arg:op_name arguments arg arg arg If Compare Call Return return:yes If Compare Return return:yes Compare If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Compare If Compare Return return:yes Compare If Compare Assign Return return:yes Call If Compare Return return:yes Compare Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_to_tf_type",
    "source_code": "def _to_tf_type(dtype):\n    return dtypes.as_dtype(dtype)",
    "docstring": "Converts a native python or numpy type to TF DType. Args: dtype: Could be a python type, a numpy type or a TF DType. Returns: A tensorflow .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:_to_tf_type arg:dtype arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_supported_filetypes_grouped",
    "source_code": "@classmethod\ndef get_supported_filetypes_grouped(cls):\n    groupings = {}\n    for ext, name in cls.filetypes.items():\n        groupings.setdefault(name, []).append(ext)\n        groupings[name].sort()\n    return groupings",
    "docstring": "Return a dict of savefig file formats supported by this backend, where the keys are a file type name, such as 'Joint Photographic Experts Group', and the values are a list of filename extensions used for that filetype, such as ['jpg', 'jpeg'].",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_supported_filetypes_grouped arg:cls arguments arg Assign For Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Tensor_summary",
    "source_code": "def Tensor_summary(valobj: Any, internal_dict: Any, options: Any) -> str:\n    with DisableBreakpoints():\n        target = get_target()\n        tensor = valobj.GetName()\n        result = target.EvaluateExpression(f'torch::gdb::tensor_repr({tensor})')\n        str_result = str(result)\n        target.EvaluateExpression(f'(void)free({result.GetValue()})')\n        str_result = '\\n' + str_result[str_result.find('tensor'):-1]\n        return str_result",
    "docstring": "Print a human readable representation of the given at::Tensor. at::Tensor instances do not have a C++ implementation of a repr method: in pytorch, this is done by pure-Python code. As such, print internally creates a Python wrapper for the given tensor and call repr() on it. Usage: print self",
    "type": "function",
    "file_path": "pytorch\\tools\\lldb\\pytorch_lldb.py",
    "ast_data": "FunctionDef name:Tensor_summary arg:valobj arg:internal_dict arg:options arguments arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "path",
    "source_code": "@staticmethod\n@abstractmethod\ndef path(X, y, **kwargs):\n    pass",
    "docstring": "Compute path with coordinate descent.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py",
    "ast_data": "FunctionDef name:path arg:X arg:y arguments arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "Sigmoid",
    "source_code": "class Sigmoid(torch.nn.Sigmoid):\n\n    def __init__(self, output_scale: float, output_zero_point: int):\n        super().__init__()\n        self.output_scale = output_scale\n        self.output_zero_point = output_zero_point\n\n    def forward(self, input):\n        return torch.ops.quantized.sigmoid(input, self.output_scale, self.output_zero_point)\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        output_scale, output_zero_point = mod.activation_post_process.calculate_qparams()\n        return cls(float(output_scale), int(output_zero_point))",
    "docstring": "This is the quantized equivalent of :class:. Args: scale: quantization scale of the output tensor zero_point: quantization zero point of the output tensor",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\activation.py",
    "ast_data": "ClassDef name:Sigmoid FunctionDef name:__init__ arg:self arg:output_scale arg:output_zero_point arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_CheckOpDeprecation",
    "source_code": "def _CheckOpDeprecation(op_type_name, op_def, producer):\n    deprecation_version = op_def.deprecation.version\n    if deprecation_version and producer >= deprecation_version:\n        raise NotImplementedError(f'Op {op_type_name} is not available in GraphDef version {producer}. It has been removed in version {deprecation_version}. {op_def.deprecation.explanation}.')",
    "docstring": "Checks if the op is deprecated.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\op_def_library.py",
    "ast_data": "FunctionDef name:_CheckOpDeprecation arg:op_type_name arg:op_def arg:producer arguments arg arg arg Assign If BoolOp Compare Raise Call"
  },
  {
    "library": "numpy",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    return tuple(((stop - start - 1) // step + 1 for start, stop, step in zip(self.start, self.stop, self.step)))",
    "docstring": "The shape of the array to be iterated over. For an example, see .",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_arrayterator_impl.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_execute_calibration",
    "source_code": "def _execute_calibration(self, calibration_input_fn):\n    for inp in calibration_input_fn():\n        args, kwargs = _convert_to_tensor(inp)\n        self._converted_func(*args, **kwargs)\n    self._for_each_trt_node(self._converted_graph_def, _save_calibration_table)\n    self._converted_func = _construct_function_from_graph_def(self._converted_func, self._converted_graph_def)\n    self._calibrated = True",
    "docstring": "Run INT8 calibration with the provided input generator function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_execute_calibration arg:self arg:calibration_input_fn arguments arg arg For Call Assign Call Call Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "as_proxy",
    "source_code": "def as_proxy(self):\n    return self.__variable.as_proxy()",
    "docstring": "Returns an fx.Proxy (or tuple/list of fx.Proxy) representing this variable in the FX graph we are assembling to pass to the user compiler. This method only works for variables we actually track in the FX graph, aka Tensors (and ints, if you are compiling with dynamic shapes). In particular, if you have a list or tuple of tensors, you will get a list/tuple of proxies (not a single proxy representing the entire list/tuple).",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:as_proxy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "CppOptions",
    "source_code": "class CppOptions(BuildOptionsBase):\n\n    def __init__(self, compile_only: bool=False, warning_all: bool=True, extra_flags: Sequence[str]=(), use_relative_path: bool=False, compiler: str='', min_optimize: bool=False, precompiling: bool=False, preprocessing: bool=False) -> None:\n        super().__init__(compile_only=compile_only, use_relative_path=use_relative_path, precompiling=precompiling, preprocessing=preprocessing)\n        self._compiler = compiler if compiler else get_cpp_compiler()\n        definitions, include_dirs, cflags, ldflags, libraries_dirs, libraries, passthrough_args = get_cpp_options(cpp_compiler=self._compiler, do_link=not (compile_only or precompiling or preprocessing), extra_flags=extra_flags, warning_all=warning_all, min_optimize=min_optimize)\n        _append_list(self._definitions, definitions)\n        _append_list(self._include_dirs, include_dirs)\n        _append_list(self._cflags, cflags)\n        _append_list(self._ldflags, ldflags)\n        _append_list(self._libraries_dirs, libraries_dirs)\n        _append_list(self._libraries, libraries)\n        _append_list(self._passthrough_args, passthrough_args)\n        self._finalize_options()",
    "docstring": "This class is inherited from BuildOptionsBase, and as cxx build options. This option need contains basic cxx build option, which contains: 1. OS related args. 2. Toolchains related args. 3. Cxx standard related args. Note: 1. This Options is good for assist modules build, such as x86_isa_help.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\cpp_builder.py",
    "ast_data": "ClassDef name:CppOptions FunctionDef name:__init__ arg:self arg:compile_only arg:warning_all arg:extra_flags arg:use_relative_path arg:compiler arg:min_optimize arg:precompiling arg:preprocessing arguments arg arg arg arg arg arg arg arg arg Call Call Assign Call Assign Call BoolOp Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_padded_unsharded_flat_param",
    "source_code": "def _get_padded_unsharded_flat_param(self) -> torch.Tensor:\n    self._check_sharded_strategy()\n    flat_param = self.flat_param\n    if self._force_full_precision and self._uses_param_mixed_precision:\n        unsharded_flat_param = flat_param._full_prec_full_param_padded\n        _p_assert(unsharded_flat_param.dtype != self._fwd_bwd_param_dtype, f'Expects full precision but got {self._fwd_bwd_param_dtype}')\n        if flat_param._full_param_padded.untyped_storage().size() > 0:\n            _free_storage(flat_param._full_param_padded)\n    else:\n        unsharded_flat_param = flat_param._full_param_padded\n    return unsharded_flat_param",
    "docstring": "Return a reference to the padded unsharded flat parameter depending on the calling context. This should only be called if using a sharded strategy.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_get_padded_unsharded_flat_param arg:self arguments arg Call Assign If BoolOp Assign Call Compare If Compare Call Call Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "blend_image",
    "source_code": "def blend_image(self, src_img: Tensor, dst_img: Tensor, mask: Tensor) -> Tensor:\n    out: Tensor\n    if self.blending_method == 'naive':\n        out = where(mask == 1, src_img, dst_img)\n    else:\n        raise NotImplementedError(f'Unsupported blending method {self.blending_method}. Use `naive`.')\n    return out",
    "docstring": "Blend two images together.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\image_stitching.py",
    "ast_data": "FunctionDef name:blend_image arg:self arg:src_img arg:dst_img arg:mask arguments arg arg arg arg If Compare Assign Call Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "serialize_state",
    "source_code": "def serialize_state(self, name=None):\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_serialize_state_v2(self._reader_ref, name=name)\n    else:\n        return gen_io_ops.reader_serialize_state(self._reader_ref, name=name)",
    "docstring": "Produce a string tensor that encodes the state of a reader. Not all Readers support being serialized, so this can produce an Unimplemented error. Args: name: A name for the operation (optional). Returns: A string Tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:serialize_state arg:self arg:name arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_version_to_int",
    "source_code": "def convert_version_to_int(version):\n    version = version.split('-')[0]\n    version_segments = version.split('.')\n    if len(version_segments) == 2:\n        version_segments.append('0')\n    for seg in version_segments:\n        if not seg.isdigit():\n            return None\n    version_str = ''.join(['%03d' % int(seg) for seg in version_segments])\n    return int(version_str)",
    "docstring": "Convert a version number to a integer that can be used to compare. Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The 'xxxxx' part, for instance 'homebrew' on OS/X, is ignored. Args: version: a version to be converted Returns: An integer if converted successfully, otherwise return None.",
    "type": "function",
    "file_path": "tensorflow\\configure.py",
    "ast_data": "FunctionDef name:convert_version_to_int arg:version arguments arg Assign Call Assign Call If Compare Call Call For If Call Return return:no Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_Marker",
    "source_code": "class _Marker(object):\n    __slots__ = ['_s']\n\n    def __init__(self, s):\n        self._s = s\n\n    def __repr__(self):\n        return str(self._s)",
    "docstring": "Markers used to pretty-print nested args in function signatures.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "ClassDef name:_Marker Assign FunctionDef name:__init__ arg:self arg:s arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "get_matrix",
    "source_code": "def get_matrix(self):\n    raise NotImplementedError('The method ``get_matrix(p)`` is not implemented.')",
    "docstring": "Return current internal matrix. Returns ------- H : ndarray, shape (n, n) Dense matrix containing either the Hessian or its inverse (depending on how 'approx_type' is defined).",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py",
    "ast_data": "FunctionDef name:get_matrix arg:self arguments arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "readonly",
    "source_code": "@property\ndef readonly(self):\n    return self._readonly",
    "docstring": "if the is read-only.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:readonly arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "create_option",
    "source_code": "def create_option(name, ty, docstring, default_factory=lambda: None):\n\n    def get_fn(option):\n        if name not in option._options:\n            option._options[name] = default_factory()\n        return option._options.get(name)\n\n    def set_fn(option, value):\n        if not isinstance(value, ty):\n            raise TypeError('Property \"{}\" must be of type {}, got: {} (type: {})'.format(name, ty, value, type(value)))\n        option._options[name] = value\n    return property(get_fn, set_fn, None, docstring)",
    "docstring": "Creates a type-checked property. Args: name: The name to use. ty: The type to use. The type of the property will be validated when it is set. docstring: The docstring to use. default_factory: A callable that takes no arguments and returns a default value to use if not set. Returns: A type-checked property.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\options.py",
    "ast_data": "FunctionDef name:create_option arg:name arg:ty arg:docstring arg:default_factory arguments arg arg arg arg arguments FunctionDef name:get_fn arg:option arguments arg If Compare Assign Call Return return:yes Call FunctionDef name:set_fn arg:option arg:value arguments arg arg If Call Raise Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_data",
    "source_code": "def _get_data(self, event):\n    if event.xdata is None:\n        return (None, None)\n    xdata, ydata = self._get_data_coords(event)\n    xdata = np.clip(xdata, *self.ax.get_xbound())\n    ydata = np.clip(ydata, *self.ax.get_ybound())\n    return (xdata, ydata)",
    "docstring": "Get the xdata and ydata for event, with limits.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_get_data arg:self arg:event arguments arg arg If Compare Return return:no Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "should_skip_serialization",
    "source_code": "def should_skip_serialization(layer):\n    saved_model_input_spec_set = isinstance(layer, training_lib.Model) and layer._saved_model_inputs_spec is not None\n    if not layer.built and (not saved_model_input_spec_set):\n        logging.warning('Skipping full serialization of Keras layer {}, because it is not built.'.format(layer))\n        return True\n    return False",
    "docstring": "Skip serializing extra objects and functions if layer inputs aren't set.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:should_skip_serialization arg:layer arguments arg Assign BoolOp Call Compare If BoolOp Call Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, bbox, x0=None, y0=None, x1=None, y1=None, **kwargs):\n    _api.check_isinstance(BboxBase, bbox=bbox)\n    super().__init__(**kwargs)\n    self._bbox = bbox\n    self.set_children(bbox)\n    self._points = None\n    fp = [x0, y0, x1, y1]\n    mask = [val is None for val in fp]\n    self._locked_points = np.ma.array(fp, float, mask=mask).reshape((2, 2))",
    "docstring": "Parameters ---------- bbox : The child bounding box to wrap. x0 : float or None The locked value for x0, or None to leave unlocked. y0 : float or None The locked value for y0, or None to leave unlocked. x1 : float or None The locked value for x1, or None to leave unlocked. y1 : float or None The locked value for y1, or None to leave unlocked.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bbox arg:x0 arg:y0 arg:x1 arg:y1 arguments arg arg arg arg arg arg arg Call Call Call Assign Call Assign Assign Assign Compare Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "join_process_group",
    "source_code": "@property\n@abstractmethod\ndef join_process_group(self) -> Any:\n    ...",
    "docstring": "Returns the process group for the collective communications needed by the join context manager itself.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "FunctionDef name:join_process_group arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "set_multialignment",
    "source_code": "def set_multialignment(self, align):\n    _api.check_in_list(['center', 'right', 'left'], align=align)\n    self._multialignment = align\n    self.stale = True",
    "docstring": "Set the text alignment for multiline texts. The layout of the bounding box of all the lines is determined by the horizontalalignment and verticalalignment properties. This property controls the alignment of the text lines within that box. Parameters ---------- align : {'left', 'right', 'center'}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_multialignment arg:self arg:align arguments arg arg Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_VirtualizedSerializerContextManager",
    "source_code": "class _VirtualizedSerializerContextManager(contextlib.ExitStack):\n\n    def __init__(self, virtualized: _VirtualizedSerializer) -> None:\n        super().__init__()\n        self.virtualized = virtualized\n\n    @override\n    def __enter__(self) -> Self:\n        super().__enter__()\n        for set_name in dir(V):\n            if not set_name.startswith('set_'):\n                continue\n            name = set_name[4:]\n            name = name.removesuffix('_handler')\n            set_handler = getattr(V, set_name)\n            if hasattr(self.virtualized, name):\n                value = getattr(self.virtualized, name)\n            else:\n                value = torch._inductor.virtualized._PoisonedVirtual\n            self.enter_context(set_handler(value))\n        return self",
    "docstring": "Helper for _VirtualizedSerializer.patch()",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py",
    "ast_data": "ClassDef name:_VirtualizedSerializerContextManager FunctionDef name:__init__ arg:self arg:virtualized arguments arg arg Call Call Assign FunctionDef name:__enter__ arg:self arguments arg Call Call For Call If Call Assign Assign Call Assign Call If Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_model_ready_for_local_init",
    "source_code": "def _model_ready_for_local_init(self, sess: session.Session) -> Tuple[bool, Optional[str]]:\n    return _ready(self._ready_for_local_init_op, sess, 'Model not ready for local init')",
    "docstring": "Checks if the model is ready to run local_init_op. Args: sess: A . Returns: A tuple (is_ready, msg), where is_ready is True if ready to run local_init_op and False otherwise, and msg is if the model is ready to run local_init_op, a with the reason why it is not ready otherwise.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_manager.py",
    "ast_data": "FunctionDef name:_model_ready_for_local_init arg:self arg:sess arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "transform_get_item",
    "source_code": "@register_transformation_rule(GetItem)\ndef transform_get_item(constraint, counter):\n    dims, counter = gen_tensor_dims(constraint.tensor_size, counter)\n    nat_constraints = gen_nat_constraints(dims)\n    is_valid_index = valid_index(constraint.index, dims)\n    all_constraints = [BinConstraintT(constraint.input_var, TensorType(dims), op_eq), *nat_constraints, is_valid_index]\n    if is_valid_index == T():\n        all_constraints.append(BinConstraintD(constraint.res, dims[constraint.index], op_eq))\n    return (Conj(all_constraints), counter)",
    "docstring": "generate an equality of the form: t = [a1, ..., an] then generate constraints that check if the given index is valid given this particular tensor size. If the index is valid, generate a constraint to get the item Note that we already handled the Dyn input case in the previous step. Args: constraint: GetItem which assumes we are getting an item from a tensor (not Dyn) counter: variable tracking Returns: simplified constraints for GetItem",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:transform_get_item arg:constraint arg:counter arguments arg arg Assign Call Assign Call Assign Call Assign Call Call If Compare Call Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "safe_name",
    "source_code": "def safe_name(name):\n    return '_'.join(re.split('[@/-]', name)).upper()",
    "docstring": "Make *name* safe to use as a JavaScript variable name.",
    "type": "function",
    "file_path": "matplotlib\\tools\\embed_js.py",
    "ast_data": "FunctionDef name:safe_name arg:name arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "xyz_to_rgb",
    "source_code": "def xyz_to_rgb(image: Tensor) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    x: Tensor = image[..., 0, :, :]\n    y: Tensor = image[..., 1, :, :]\n    z: Tensor = image[..., 2, :, :]\n    r: Tensor = 3.2404813432005266 * x + -1.5371515162713185 * y + -0.4985363261688878 * z\n    g: Tensor = -0.9692549499965682 * x + 1.8759900014898907 * y + 0.0415559265582928 * z\n    b: Tensor = 0.0556466391351772 * x + -0.2040413383665112 * y + 1.0573110696453443 * z\n    out: Tensor = torch.stack([r, g, b], dim=-3)\n    return out",
    "docstring": "Convert a XYZ image to RGB. Args: image: XYZ Image to be converted to RGB with shape :math:. Returns: RGB version of the image with shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = xyz_to_rgb(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\xyz.py",
    "ast_data": "FunctionDef name:xyz_to_rgb arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "logout",
    "source_code": "def logout(request):\n    user = getattr(request, 'user', None)\n    if not getattr(user, 'is_authenticated', True):\n        user = None\n    user_logged_out.send(sender=user.__class__, request=request, user=user)\n    request.session.flush()\n    if hasattr(request, 'user'):\n        from django.contrib.auth.models import AnonymousUser\n        request.user = AnonymousUser()",
    "docstring": "Remove the authenticated user's ID from the request and flush their session data.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\__init__.py",
    "ast_data": "FunctionDef name:logout arg:request arguments arg Assign Call If Call Assign Call Call If Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "islower",
    "source_code": "def islower(self):\n    return islower(self)",
    "docstring": "Returns true for each element if all cased characters in the string are lowercase and there is at least one cased character, false otherwise. See Also -------- char.islower",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:islower arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "minkowski_distance_p",
    "source_code": "def minkowski_distance_p(x, y, p=2):\n    x = np.asarray(x)\n    y = np.asarray(y)\n    common_datatype = np.promote_types(np.promote_types(x.dtype, y.dtype), 'float64')\n    x = x.astype(common_datatype)\n    y = y.astype(common_datatype)\n    if p == np.inf:\n        return np.amax(np.abs(y - x), axis=-1)\n    elif p == 1:\n        return np.sum(np.abs(y - x), axis=-1)\n    else:\n        return np.sum(np.abs(y - x) ** p, axis=-1)",
    "docstring": "Compute the pth power of the L**p distance between two arrays. For efficiency, this function computes the L**p distance but does not extract the pth root. If is 1 or infinity, this is equal to the actual L**p distance. The last dimensions of and must be the same length. Any other dimensions must be compatible for broadcasting. Parameters ---------- x : (..., K) array_like Input array. y : (..., K) array_like Input array. p : float, 1 >> from scipy.spatial import minkowski_distance_p >>> minkowski_distance_p([[0, 0], [0, 0]], [[1, 1], [0, 1]]) array([2., 1.])",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\_kdtree.py",
    "ast_data": "FunctionDef name:minkowski_distance_p arg:x arg:y arg:p arguments arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_delete_freq",
    "source_code": "def _get_delete_freq(self, loc: int | slice | Sequence[int]):\n    freq = None\n    if self.freq is not None:\n        if is_integer(loc):\n            if loc in (0, -len(self), -1, len(self) - 1):\n                freq = self.freq\n        else:\n            if is_list_like(loc):\n                loc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self))\n            if isinstance(loc, slice) and loc.step in (1, None):\n                if loc.start in (0, None) or loc.stop in (len(self), None):\n                    freq = self.freq\n    return freq",
    "docstring": "Find the for self.delete(loc).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\datetimelike.py",
    "ast_data": "FunctionDef name:_get_delete_freq arg:self arg:loc arguments arg arg Assign If Compare If Call If Compare Call Call Assign If Call Assign Call Call Call If BoolOp Call Compare If BoolOp Compare Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "getmethodclass",
    "source_code": "def getmethodclass(m):\n    if not hasattr(m, '__name__') and hasattr(m, '__class__') and hasattr(m, '__call__'):\n        if isinstance(m.__class__, type):\n            return m.__class__\n    m_self = getattr(m, '__self__', None)\n    if m_self is not None:\n        if inspect.isclass(m_self):\n            return m_self\n        return m_self.__class__\n    owners = []\n    caller_frame = tf_inspect.currentframe().f_back\n    try:\n        for v in itertools.chain(caller_frame.f_locals.values(), caller_frame.f_globals.values()):\n            if hasattr(v, m.__name__):\n                candidate = getattr(v, m.__name__)\n                if hasattr(candidate, 'im_func'):\n                    candidate = candidate.im_func\n                if hasattr(m, 'im_func'):\n                    m = m.im_func\n                if candidate is m:\n                    owners.append(v)\n    finally:\n        del caller_frame\n    if owners:\n        if len(owners) == 1:\n            return owners[0]\n        owner_types = tuple((o if tf_inspect.isclass(o) else type(o) for o in owners))\n        for o in owner_types:\n            if tf_inspect.isclass(o) and issubclass(o, tuple(owner_types)):\n                return o\n        raise ValueError('Found too many owners of %s: %s' % (m, owners))\n    return None",
    "docstring": "Resolves a function's owner, e.g. a method's class. Note that this returns the object that the function was retrieved from, not necessarily the class where it was defined. This function relies on Python stack frame support in the interpreter, and has the same limitations that inspect.currentframe. Limitations. This function will only work correctly if the owned class is visible in the caller's global or local variables. Args: m: A user defined function Returns: The class that this function was retrieved from, or None if the function is not an object or class method, or the class that owns the object or method is not visible to m. Raises: ValueError: if the class could not be resolved for any unexpected reason.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\inspect_utils.py",
    "ast_data": "FunctionDef name:getmethodclass arg:m arguments arg If BoolOp Call Call Call If Call Return return:yes Assign Call If Compare If Call Return return:yes Return return:yes Assign Assign Call Try For Call Call Call If Call Assign Call If Call Assign If Call Assign If Compare Call If If Compare Call Return return:yes Assign Call Call Call For If BoolOp Call Call Call Return return:yes Raise Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> Sequence[Any]:\n    with self._pytree_extension_context:\n        return super().apply(model_outputs, model=model)",
    "docstring": "Flatten the model outputs, under the context of pytree extension.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\dynamo_graph_extractor.py",
    "ast_data": "FunctionDef name:apply arg:self arg:model_outputs arg:model arguments arg arg arg With Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_unpack_quantized_tensor",
    "source_code": "def _unpack_quantized_tensor(tuple_value: _C.Value) -> tuple[_C.Value, ...]:\n    tuple_node = tuple_value.node()\n    if not _is_tuple_construct(tuple_value):\n        raise errors.SymbolicValueError(f'ONNX symbolic expected the output of `{tuple_node}` to be a quantized tensor. Is this likely due to missing support for quantized `{tuple_node.kind()}`. Please create an issue on {_constants.PYTORCH_GITHUB_ISSUES_URL}', tuple_value)\n    unpacked = tuple(tuple_node.inputs())\n    assert len(unpacked) == 3 or len(unpacked) == 4\n    return unpacked",
    "docstring": "Unpacks a quantized tensor into a tuple of tensor and scale/zero_point. Args: tuple_value: A tuple of tensor, scale, zero_point, and optionally axis. Returns: A tuple of tensor, scale, zero_point, and optionally axis.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py",
    "ast_data": "FunctionDef name:_unpack_quantized_tensor arg:tuple_value arguments arg Assign Call If Call Raise Call Call Assign Call Call BoolOp Compare Call Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_VariableTransform",
    "source_code": "class _VariableTransform:\n\n    @property\n    def transformed_limits(self):\n        raise NotImplementedError\n\n    @property\n    def points(self):\n        return []\n\n    def inv(self, x):\n        raise NotImplementedError\n\n    def __call__(self, t, *args, **kwargs):\n        raise NotImplementedError",
    "docstring": "A transformation that can be applied to an integral.",
    "type": "class",
    "file_path": "scipy\\scipy\\integrate\\_cubature.py",
    "ast_data": "ClassDef name:_VariableTransform FunctionDef name:transformed_limits arg:self arguments arg Raise FunctionDef name:points arg:self arguments arg Return return:no FunctionDef name:inv arg:self arg:x arguments arg arg Raise FunctionDef name:__call__ arg:self arg:t arguments arg arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, x_orig):\n    if x_orig.numel() == 0:\n        return x_orig\n    x = x_orig.detach()\n    x = x.to(self.min_val.dtype)\n    min_val_cur, max_val_cur = torch.aminmax(x)\n    min_val = torch.min(min_val_cur, self.min_val)\n    max_val = torch.max(max_val_cur, self.max_val)\n    self.min_val.copy_(min_val)\n    self.max_val.copy_(max_val)\n    return x_orig",
    "docstring": "Records the running minimum and maximum of ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "FunctionDef name:forward arg:self arg:x_orig arguments arg arg If Compare Call Return return:yes Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_parameter",
    "source_code": "def get_parameter(self, target: str) -> 'Parameter':\n    module_path, _, param_name = target.rpartition('.')\n    mod: torch.nn.Module = self.get_submodule(module_path)\n    if not hasattr(mod, param_name):\n        raise AttributeError(mod._get_name() + ' has no attribute `' + param_name + '`')\n    param: torch.nn.Parameter = getattr(mod, param_name)\n    if not isinstance(param, torch.nn.Parameter):\n        raise AttributeError('`' + param_name + '` is not an nn.Parameter')\n    return param",
    "docstring": "Return the parameter given by ``",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:get_parameter arg:self arg:target arguments arg arg Assign Call Call If Call Raise Call Call Call If Call Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ArtistAnimation",
    "source_code": "class ArtistAnimation(TimedAnimation):\n\n    def __init__(self, fig, artists, *args, **kwargs):\n        self._drawn_artists = []\n        self._framedata = artists\n        super().__init__(fig, *args, **kwargs)\n\n    def _init_draw(self):\n        super()._init_draw()\n        figs = set()\n        for f in self.new_frame_seq():\n            for artist in f:\n                artist.set_visible(False)\n                artist.set_animated(self._blit)\n                if artist.get_figure() not in figs:\n                    figs.add(artist.get_figure())\n        for fig in figs:\n            fig.canvas.draw_idle()\n\n    def _pre_draw(self, framedata, blit):\n        if blit:\n            self._blit_clear(self._drawn_artists)\n        else:\n            for artist in self._drawn_artists:\n                artist.set_visible(False)\n\n    def _draw_frame(self, artists):\n        self._drawn_artists = artists\n        for artist in artists:\n            artist.set_visible(True)",
    "docstring": "subclass that creates an animation by using a fixed set of objects. Before creating an instance, all plotting should have taken place and the relevant artists saved. .. note:: You must store the created Animation in a variable that lives as long as the animation should run. Otherwise, the Animation object will be garbage-collected and the animation stops. Parameters ---------- fig : The figure object used to get needed events, such as draw or resize. artists : list Each list entry is a collection of objects that are made visible on the corresponding frame. Other artists are made invisible. interval : int, default: 200 Delay between frames in milliseconds. repeat_delay : int, default: 0 The delay in milliseconds between consecutive animation runs, if *repeat* is True. repeat : bool, default: True Whether the animation repeats when the sequence of frames is completed. blit : bool, default: False Whether blitting is used to optimize drawing.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "ClassDef name:ArtistAnimation FunctionDef name:__init__ arg:self arg:fig arg:artists arguments arg arg arg arg arg Assign Assign Call Call FunctionDef name:_init_draw arg:self arguments arg Call Call Assign Call For Call For Call Call If Compare Call Call Call For Call FunctionDef name:_pre_draw arg:self arg:framedata arg:blit arguments arg arg arg If Call For Call FunctionDef name:_draw_frame arg:self arg:artists arguments arg arg Assign For Call"
  },
  {
    "library": "django",
    "name": "get_func_full_args",
    "source_code": "def get_func_full_args(func):\n    params = _get_callable_parameters(func)\n    args = []\n    for param in params:\n        name = param.name\n        if name == 'self':\n            continue\n        if param.kind == inspect.Parameter.VAR_POSITIONAL:\n            name = '*' + name\n        elif param.kind == inspect.Parameter.VAR_KEYWORD:\n            name = '**' + name\n        if param.default != inspect.Parameter.empty:\n            args.append((name, param.default))\n        else:\n            args.append((name,))\n    return args",
    "docstring": "Return a list of (argument name, default value) tuples. If the argument does not have a default value, omit it in the tuple. Arguments such as *args and **kwargs are also included.",
    "type": "function",
    "file_path": "django\\django\\utils\\inspect.py",
    "ast_data": "FunctionDef name:get_func_full_args arg:func arguments arg Assign Call Assign For Assign If Compare If Compare Assign If Compare Assign If Compare Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "form_invalid",
    "source_code": "def form_invalid(self, form):\n    return self.render_to_response(self.get_context_data(form=form))",
    "docstring": "If the form is invalid, render the invalid form.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\edit.py",
    "ast_data": "FunctionDef name:form_invalid arg:self arg:form arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "create_signature_map",
    "source_code": "def create_signature_map(signatures):\n    signature_map = _SignatureMap()\n    for name, func in signatures.items():\n        assert isinstance(func, defun.ConcreteFunction)\n        assert isinstance(func.structured_outputs, collections_abc.Mapping)\n        if len(func._arg_keywords) == 1:\n            assert 1 == func._num_positional_args\n        else:\n            assert 0 == func._num_positional_args\n        signature_map._add_signature(name, func)\n    return signature_map",
    "docstring": "Creates an object containing .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\signature_serialization.py",
    "ast_data": "FunctionDef name:create_signature_map arg:signatures arguments arg Assign Call For Call Call Call If Compare Call Compare Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_wrap_and_check_metrics",
    "source_code": "def _wrap_and_check_metrics(self, metrics):\n    if not isinstance(metrics, dict):\n        metrics = {self.METRICS_NAME: metrics}\n    outputs = {}\n    for key, value in metrics.items():\n        if isinstance(value, tuple):\n            metric_val, metric_op = value\n        else:\n            metric_val = value.result()\n            assert len(value.updates) == 1\n            metric_op = value.updates[0]\n        key = self._check_output_key(key, self.METRICS_NAME)\n        key = self._prefix_key(key, self.METRICS_NAME)\n        val_name = key + self._SEPARATOR_CHAR + self.METRIC_VALUE_SUFFIX\n        op_name = key + self._SEPARATOR_CHAR + self.METRIC_UPDATE_SUFFIX\n        if not isinstance(metric_val, tensor.Tensor):\n            raise ValueError('{} output value must be a Tensor; got {}.'.format(key, metric_val))\n        if not (tensor_util.is_tf_type(metric_op) or isinstance(metric_op, ops.Operation)):\n            raise ValueError('{} update_op must be a Tensor or Operation; got {}.'.format(key, metric_op))\n        metric_op_tensor = metric_op\n        if not isinstance(metric_op, tensor.Tensor):\n            with ops.control_dependencies([metric_op]):\n                metric_op_tensor = constant_op.constant([], name='metric_op_wrapper')\n        outputs[val_name] = metric_val\n        outputs[op_name] = metric_op_tensor\n    return outputs",
    "docstring": "Handle the saving of metrics. Metrics is either a tuple of (value, update_op), or a dict of such tuples. Here, we separate out the tuples and create a dict with names to tensors. Args: metrics: Dict of metric results keyed by name. The values of the dict can be one of the following: (1) instance of class. (2) (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op. Returns: dict of output_names to tensors Raises: ValueError: if the dict key is not a string, or the metric values or ops are not tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "FunctionDef name:_wrap_and_check_metrics arg:self arg:metrics arguments arg arg If Call Assign Assign For Call If Call Assign Assign Call Compare Call Assign Assign Call Assign Call Assign Assign If Call Raise Call Call If BoolOp Call Call Raise Call Call Assign If Call With Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "lookup",
    "source_code": "def lookup(self, name):\n    name = compat.as_str(name)\n    if name in self._registry:\n        return self._registry[name][_TYPE_TAG]\n    else:\n        raise LookupError('%s registry has no entry for: %s' % (self._name, name))",
    "docstring": "Looks up \"name\". Args: name: a string specifying the registry key for the candidate. Returns: Registered object if found Raises: LookupError: if \"name\" has not been registered.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\registry.py",
    "ast_data": "FunctionDef name:lookup arg:self arg:name arguments arg arg Assign Call If Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_merge_bw",
    "source_code": "def _merge_bw(compute_actions: list[Optional[_Action]]) -> list[_Action]:\n    merged_actions = []\n    while compute_actions:\n        action = compute_actions.pop(0)\n        if action is None:\n            continue\n        while len(compute_actions) and (next_action := compute_actions[0]) is None:\n            compute_actions.pop(0)\n        if action.computation_type == BACKWARD_INPUT and next_action is not None and (next_action.computation_type == BACKWARD_WEIGHT) and (action.stage_index == next_action.stage_index) and (action.microbatch_index == next_action.microbatch_index):\n            merged_actions.append(_Action(action.stage_index, FULL_BACKWARD, action.microbatch_index))\n            compute_actions.pop(0)\n        else:\n            merged_actions.append(action)\n    return merged_actions",
    "docstring": "Given a basic schedule involving only compute actions (F,I,W), merge adjacent I and W ops into B ops. (note: I = BACKWARD_INPUT, W = BACKWARD_WEIGHT, B = FULL_BACKWARD) B refers to running the whole backward (not separating grad_input and grad_weight), which can be more efficient in some cases.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:_merge_bw arg:compute_actions arguments arg Assign While Assign Call If Compare While BoolOp Call Compare Call If BoolOp Compare Compare Compare Compare Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "to_filehandle",
    "source_code": "def to_filehandle(fname, flag='r', return_opened=False, encoding=None):\n    if isinstance(fname, os.PathLike):\n        fname = os.fspath(fname)\n    if isinstance(fname, str):\n        if fname.endswith('.gz'):\n            fh = gzip.open(fname, flag)\n        elif fname.endswith('.bz2'):\n            import bz2\n            fh = bz2.BZ2File(fname, flag)\n        else:\n            fh = open(fname, flag, encoding=encoding)\n        opened = True\n    elif hasattr(fname, 'seek'):\n        fh = fname\n        opened = False\n    else:\n        raise ValueError('fname must be a PathLike or file handle')\n    if return_opened:\n        return (fh, opened)\n    return fh",
    "docstring": "Convert a path to an open file handle or pass-through a file-like object. Consider using instead, as it allows one to properly close newly created file objects more easily. Parameters ---------- fname : str or path-like or file-like If or , the file is opened using the flags specified by *flag* and *encoding*. If a file-like object, it is passed through. flag : str, default: 'r' Passed as the *mode* argument to when *fname* is or ; ignored if *fname* is file-like. return_opened : bool, default: False If True, return both the file object and a boolean indicating whether this was a new file (that the caller needs to close). If False, return only the new file. encoding : str or None, default: None Passed as the *mode* argument to when *fname* is or ; ignored if *fname* is file-like. Returns ------- fh : file-like opened : bool *opened* is only returned if *return_opened* is True.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:to_filehandle arg:fname arg:flag arg:return_opened arg:encoding arguments arg arg arg arg If Call Assign Call If Call If Call Assign Call If Call Assign Call Assign Call Assign If Call Assign Assign Raise Call If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "pack_eager_tensors",
    "source_code": "def pack_eager_tensors(self, tensors):\n    self.ensure_initialized()\n    return pywrap_tfe.TFE_Py_PackEagerTensors(self._handle, tensors)",
    "docstring": "Pack multiple s of the same dtype and shape. Args: tensors: a list of EagerTensors to pack. Returns: A packed EagerTensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:pack_eager_tensors arg:self arg:tensors arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "ymin",
    "source_code": "@property\ndef ymin(self) -> torch.Tensor:\n    return self._data[..., 1]",
    "docstring": "The bounding box top-left y-coordinate.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:ymin arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_init_progbar",
    "source_code": "def _maybe_init_progbar(self):\n    self.stateful_metrics = set(self.stateful_metrics)\n    if self.model:\n        self.stateful_metrics = self.stateful_metrics.union(set((m.name for m in self.model.metrics)))\n    if self.progbar is None:\n        self.progbar = Progbar(target=self.target, verbose=self.verbose, stateful_metrics=self.stateful_metrics, unit_name='step' if self.use_steps else 'sample')\n    self.progbar._update_stateful_metrics(self.stateful_metrics)",
    "docstring": "Instantiate a if not yet, and update the stateful metrics.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_maybe_init_progbar arg:self arguments arg Assign Call If Assign Call Call If Compare Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "resnet50",
    "source_code": "def resnet50(pretrained=False, progress=True, **kwargs):\n    return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)",
    "docstring": "ResNet-50 model from _ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py",
    "ast_data": "FunctionDef name:resnet50 arg:pretrained arg:progress arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_frame_on",
    "source_code": "def set_frame_on(self, b):\n    self._frameon = b\n    self.stale = True",
    "docstring": "Set whether the Axes rectangle patch is drawn. Parameters ---------- b : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_frame_on arg:self arg:b arguments arg arg Assign Assign"
  },
  {
    "library": "numpy",
    "name": "_block_format_index",
    "source_code": "def _block_format_index(index):\n    idx_str = ''.join((f'[{i}]' for i in index if i is not None))\n    return 'arrays' + idx_str",
    "docstring": "Convert a list of indices ``.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\shape_base.py",
    "ast_data": "FunctionDef name:_block_format_index arg:index arguments arg Assign Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "stop",
    "source_code": "def stop(self) -> None:\n    self._stop()",
    "docstring": "Stops the server. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while stopping the server.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "_average_path_length",
    "source_code": "def _average_path_length(n_samples_leaf):\n    n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False)\n    n_samples_leaf_shape = n_samples_leaf.shape\n    n_samples_leaf = n_samples_leaf.reshape((1, -1))\n    average_path_length = np.zeros(n_samples_leaf.shape)\n    mask_1 = n_samples_leaf <= 1\n    mask_2 = n_samples_leaf == 2\n    not_mask = ~np.logical_or(mask_1, mask_2)\n    average_path_length[mask_1] = 0.0\n    average_path_length[mask_2] = 1.0\n    average_path_length[not_mask] = 2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma) - 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask]\n    return average_path_length.reshape(n_samples_leaf_shape)",
    "docstring": "The average path length in a n_samples iTree, which is equal to the average path length of an unsuccessful BST search since the latter has the same structure as an isolation tree. Parameters ---------- n_samples_leaf : array-like of shape (n_samples,) The number of training samples in each test sample leaf, for each estimators. Returns ------- average_path_length : ndarray of shape (n_samples,)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_iforest.py",
    "ast_data": "FunctionDef name:_average_path_length arg:n_samples_leaf arguments arg Assign Call Assign Assign Call Assign Call Assign Compare Assign Compare Assign Call Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_caching_device",
    "source_code": "def _caching_device(rnn_cell):\n    if context.executing_eagerly():\n        return None\n    if not getattr(rnn_cell, '_enable_caching_device', False):\n        return None\n    if control_flow_util.IsInWhileLoop(ops.get_default_graph()):\n        logging.warning('Variable read device caching has been disabled because the RNN is in tf.while_loop loop context, which will cause reading stalled value in forward path. This could slow down the training due to duplicated variable reads. Please consider updating your code to remove tf.while_loop if possible.')\n        return None\n    if rnn_cell._dtype_policy.compute_dtype != rnn_cell._dtype_policy.variable_dtype:\n        logging.warning(\"Variable read device caching has been disabled since it doesn't work with the mixed precision API. This is likely to cause a slowdown for RNN training due to duplicated read of variable for each timestep, which will be significant in a multi remote worker setting. Please consider disabling mixed precision API if the performance has been affected.\")\n        return None\n    return lambda op: op.device",
    "docstring": "Returns the caching device for the RNN variable. This is useful for distributed training, when variable is not located as same device as the training worker. By enabling the device cache, this allows worker to read the variable once and cache locally, rather than read it every time step from remote when it is needed. Note that this is assuming the variable that cell needs for each time step is having the same value in the forward path, and only gets updated in the backprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If the cell body relies on any variable that gets updated every time step, then caching device will cause it to read the stall value. Args: rnn_cell: the rnn cell instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:_caching_device arg:rnn_cell arguments arg If Call Return return:no If Call Return return:no If Call Call Call Return return:no If Compare Call Return return:no Return return:yes arguments arg"
  },
  {
    "library": "django",
    "name": "semi_major",
    "source_code": "@property\ndef semi_major(self):\n    return capi.semi_major(self.ptr, byref(c_int()))",
    "docstring": "Return the Semi Major Axis for this Spatial Reference.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:semi_major arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "tape_grad_fn",
    "source_code": "def tape_grad_fn(*result_grad_components):\n    result_grads = composite_tensor_gradient.replace_flat_tensors_for_gradients(nest.flatten(result), result_grad_components[:flat_result_len])\n    if not isinstance(result_grads, (list, tuple)):\n        result_grads = [result_grads]\n    if variables:\n        input_grads, variable_grads = grad_fn(*result_grads, variables=variables)\n        if len(variable_grads) != len(variables):\n            raise ValueError('Must return gradient for each variable from @custom_gradient grad_fn.')\n    else:\n        input_grads = grad_fn(*result_grads)\n        variable_grads = []\n    input_grads = composite_tensor_gradient.get_flat_tensors_for_gradients(nest.flatten(input_grads))\n    return [None] * flat_result_len + input_grads + variable_grads",
    "docstring": "Custom grad fn wrapper.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\custom_gradient.py",
    "ast_data": "FunctionDef name:tape_grad_fn arguments arg Assign Call Call If Call Assign If Assign Call If Compare Call Call Raise Call Assign Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unpack",
    "source_code": "def unpack(value):\n    if not is_packed(value):\n        return value\n    variant = value._tf_extension_type_packed_variant\n    spec = value._tf_extension_type_cached_type_spec\n    spec = spec._tf_extension_type_with_packed(False)\n    return composite_tensor_ops.composite_tensor_from_variant(variant, spec)",
    "docstring": "Returns a copy of with individual fields stored in __dict__. Args: value: An object. Returns: An object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:unpack arg:value arguments arg If Call Return return:yes Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_check_valid_iterator_id",
    "source_code": "def _check_valid_iterator_id(self, iterator_id) -> bool:\n    return iterator_id == self._valid_iterator_id and iterator_id == self.main_datapipe._valid_iterator_id",
    "docstring": "Check the valid iterator ID against that of DataPipe object and that of .",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\combining.py",
    "ast_data": "FunctionDef name:_check_valid_iterator_id arg:self arg:iterator_id arguments arg arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "django",
    "name": "prepare_sql_script",
    "source_code": "def prepare_sql_script(self, sql):\n    return [sqlparse.format(statement, strip_comments=True) for statement in sqlparse.split(sql) if statement]",
    "docstring": "Take an SQL script that may contain multiple lines and return a list of statements to feed to successive cursor.execute() calls. Since few databases are able to process raw SQL scripts in a single cursor.execute() call and PEP 249 doesn't talk about this use case, the default implementation is conservative.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:prepare_sql_script arg:self arg:sql arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "ridder",
    "source_code": "def ridder(f, a, b, args=(), xtol=_xtol, rtol=_rtol, maxiter=_iter, full_output=False, disp=True):\n    if not isinstance(args, tuple):\n        args = (args,)\n    maxiter = operator.index(maxiter)\n    if xtol <= 0:\n        raise ValueError(f'xtol too small ({xtol:g} <= 0)')\n    if rtol < _rtol:\n        raise ValueError(f'rtol too small ({rtol:g} < {_rtol:g})')\n    f = _wrap_nan_raise(f)\n    r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp)\n    return results_c(full_output, r, 'ridder')",
    "docstring": "Find a root of a function in an interval using Ridder's method. Parameters ---------- f : function Python function returning a number. f must be continuous, and f(a) and f(b) must have opposite signs. a : scalar One end of the bracketing interval [a,b]. b : scalar The other end of the bracketing interval [a,b]. xtol : number, optional The computed root `maxiterfffull_outputfull_outputxrRootResultsRootResultsfabRootResultsfab`abs(x - x0) >> def f(x): ... return (x**2 - 1) >>> from scipy import optimize >>> root = optimize.ridder(f, 0, 2) >>> root 1.0 >>> root = optimize.ridder(f, -2, 0) >>> root -1.0",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:ridder arg:f arg:a arg:b arg:args arg:xtol arg:rtol arg:maxiter arg:full_output arg:disp arguments arg arg arg arg arg arg arg arg arg If Call Assign Assign Call If Compare Raise Call If Compare Raise Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_anchored_bbox",
    "source_code": "def _get_anchored_bbox(loc, bbox, parentbbox, borderpad):\n    c = [None, 'NE', 'NW', 'SW', 'SE', 'E', 'W', 'E', 'S', 'N', 'C'][loc]\n    container = parentbbox.padded(-borderpad)\n    return bbox.anchored(c, container=container).p0",
    "docstring": "Return the (x, y) position of the *bbox* anchored at the *parentbbox* with the *loc* code with the *borderpad*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:_get_anchored_bbox arg:loc arg:bbox arg:parentbbox arg:borderpad arguments arg arg arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "convert_dvi_to_png",
    "source_code": "def convert_dvi_to_png(dvipath: Path, builder: Builder, out_path: Path) -> int | None:\n    name = 'dvipng'\n    command = [builder.config.imgmath_dvipng, '-o', out_path, '-T', 'tight', '-z9']\n    command.extend(builder.config.imgmath_dvipng_args)\n    if builder.config.imgmath_use_preview:\n        command.append('--depth')\n    command.append(dvipath)\n    stdout, _stderr = convert_dvi_to_image(command, name)\n    depth = None\n    if builder.config.imgmath_use_preview:\n        for line in stdout.splitlines():\n            matched = depth_re.match(line)\n            if matched:\n                depth = int(matched.group(1))\n                write_png_depth(out_path, depth)\n                break\n    return depth",
    "docstring": "Convert DVI file to PNG image.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\imgmath.py",
    "ast_data": "FunctionDef name:convert_dvi_to_png arg:dvipath arg:builder arg:out_path arguments arg arg arg Assign Assign Call If Call Call Assign Call Assign If For Call Assign Call If Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__get__",
    "source_code": "def __get__(self, instance, cls=None):\n    if instance is None:\n        return self\n    return self.related_manager_cls(instance)",
    "docstring": "Get the related objects through the reverse relation. With the example above, when getting `` class (unused)",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py",
    "ast_data": "FunctionDef name:__get__ arg:self arg:instance arg:cls arguments arg arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "compress_group_index",
    "source_code": "def compress_group_index(group_index: npt.NDArray[np.int64], sort: bool=True) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]:\n    if len(group_index) and np.all(group_index[1:] >= group_index[:-1]):\n        unique_mask = np.concatenate([group_index[:1] > -1, group_index[1:] != group_index[:-1]])\n        comp_ids = unique_mask.cumsum()\n        comp_ids -= 1\n        obs_group_ids = group_index[unique_mask]\n    else:\n        size_hint = len(group_index)\n        table = hashtable.Int64HashTable(size_hint)\n        group_index = ensure_int64(group_index)\n        comp_ids, obs_group_ids = table.get_labels_groupby(group_index)\n        if sort and len(obs_group_ids) > 0:\n            obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)\n    return (ensure_int64(comp_ids), ensure_int64(obs_group_ids))",
    "docstring": "Group_index is offsets into cartesian product of all possible labels. This space can be huge, so this function compresses it, by computing offsets (comp_ids) into the list of unique labels (obs_group_ids).",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\sorting.py",
    "ast_data": "FunctionDef name:compress_group_index arg:group_index arg:sort arguments arg arg If BoolOp Call Call Compare Assign Call Compare Compare Assign Call Assign Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__lt__",
    "source_code": "def __lt__(self, other):\n    return str(self) < str(other)",
    "docstring": "Allows feature columns to be sorted in Python 3 as they are in Python 2. Feature columns need to occasionally be sortable, for example when used as keys in a features dictionary passed to a layer. In CPython, must be defined for all objects in the sequence being sorted. If any objects in the sequence being sorted do not have an method compatible with feature column objects (such as strings), then CPython will fall back to using the method below. Args: other: The other object to compare to. Returns: True if the string representation of this object is lexicographically less than the string representation of . For FeatureColumn objects, this looks like \"\".",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py",
    "ast_data": "FunctionDef name:__lt__ arg:self arg:other arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "numpy",
    "name": "ismethod",
    "source_code": "def ismethod(object):\n    return isinstance(object, types.MethodType)",
    "docstring": "Return true if the object is an instance method. Instance method objects provide these attributes: __doc__ documentation string __name__ name with which this method was defined im_class class object in which this method belongs im_func function object containing implementation of method im_self instance to which this method is bound, or None",
    "type": "function",
    "file_path": "numpy\\numpy\\_utils\\_inspect.py",
    "ast_data": "FunctionDef name:ismethod arg:object arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_is_full_circle_rad",
    "source_code": "def _is_full_circle_rad(thetamin, thetamax):\n    return abs(abs(thetamax - thetamin) - 2 * np.pi) < 1.74e-14",
    "docstring": "Determine if a wedge (in radians) spans the full circle. The condition is derived from :class:.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:_is_full_circle_rad arg:thetamin arg:thetamax arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "pygame",
    "name": "DirtySprite",
    "source_code": "class DirtySprite(Sprite):\n\n    def __init__(self, *groups):\n        self.dirty = 1\n        self.blendmode = 0\n        self._visible = 1\n        self._layer = getattr(self, '_layer', 0)\n        self.source_rect = None\n        Sprite.__init__(self, *groups)\n\n    def _set_visible(self, val):\n        self._visible = val\n        if self.dirty < 2:\n            self.dirty = 1\n\n    def _get_visible(self):\n        return self._visible\n\n    @property\n    def visible(self):\n        return self._get_visible()\n\n    @visible.setter\n    def visible(self, value):\n        self._set_visible(value)\n\n    @property\n    def layer(self):\n        return self._layer\n\n    @layer.setter\n    def layer(self, value):\n        if not self.alive():\n            self._layer = value\n        else:\n            raise AttributeError(\"Can't set layer directly after adding to group. Use group.change_layer(sprite, new_layer) instead.\")\n\n    def __repr__(self):\n        return f'<{self.__class__.__name__} DirtySprite(in {len(self.groups())} groups)>'",
    "docstring": "a more featureful subclass of Sprite with more attributes pygame.sprite.DirtySprite(*groups): return DirtySprite Extra DirtySprite attributes with their default values: dirty = 1 If set to 1, it is repainted and then set to 0 again. If set to 2, it is always dirty (repainted each frame; flag is not reset). If set to 0, it is not dirty and therefore not repainted again. blendmode = 0 It's the special_flags argument of Surface.blit; see the blendmodes in the Surface.blit documentation source_rect = None This is the source rect to use. Remember that it is relative to the top left corner (0, 0) of self.image. visible = 1 Normally this is 1. If set to 0, it will not be repainted. (If you change visible to 1, you must set dirty to 1 for it to be erased from the screen.) _layer = 0 0 is the default value but this is able to be set differently when subclassing.",
    "type": "class",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "ClassDef name:DirtySprite FunctionDef name:__init__ arg:self arguments arg arg Assign Assign Assign Assign Call Assign Call FunctionDef name:_set_visible arg:self arg:val arguments arg arg Assign If Compare Assign FunctionDef name:_get_visible arg:self arguments arg Return return:yes FunctionDef name:visible arg:self arguments arg Return return:yes Call FunctionDef name:visible arg:self arg:value arguments arg arg Call FunctionDef name:layer arg:self arguments arg Return return:yes FunctionDef name:layer arg:self arg:value arguments arg arg If Call Assign Raise Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "Linear",
    "source_code": "class Linear(Module):\n    __constants__ = ['in_features', 'out_features']\n    in_features: int\n    out_features: int\n    weight: Tensor\n\n    def __init__(self, in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) -> None:\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        super().__init__()\n        self.in_features = in_features\n        self.out_features = out_features\n        self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))\n        if bias:\n            self.bias = Parameter(torch.empty(out_features, **factory_kwargs))\n        else:\n            self.register_parameter('bias', None)\n        self.reset_parameters()\n\n    def reset_parameters(self) -> None:\n        init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n        if self.bias is not None:\n            fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)\n            bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n            init.uniform_(self.bias, -bound, bound)\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.linear(input, self.weight, self.bias)\n\n    def extra_repr(self) -> str:\n        return f'in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}'",
    "docstring": "Applies an affine linear transformation to the incoming data: :math:. This module supports :ref:. On certain ROCm devices, when using float16 inputs this module will use :ref: for backward. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to `(*, H_\\text{in})*H_\\text{in} = \\text{in\\_features}(*, H_\\text{out})H_\\text{out} = \\text{out\\_features}(\\text{out\\_features}, \\text{in\\_features})\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})k = \\frac{1}{\\text{in\\_features}}(\\text{out\\_features})bias\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})k = \\frac{1}{\\text{in\\_features}}` Examples:: >>> m = nn.Linear(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30])",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\linear.py",
    "ast_data": "ClassDef name:Linear Assign FunctionDef name:__init__ arg:self arg:in_features arg:out_features arg:bias arg:device arg:dtype arguments arg arg arg arg arg arg Assign Call Call Assign Assign Assign Call Call If Assign Call Call Call Call FunctionDef name:reset_parameters arg:self arguments arg Call Call If Compare Assign Call Assign Compare Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "sphinx",
    "name": "FilterSystemMessages",
    "source_code": "class FilterSystemMessages(SphinxTransform):\n    default_priority = 999\n\n    def apply(self, **kwargs: Any) -> None:\n        filterlevel = 2 if self.config.keep_warnings else 5\n        for node in list(self.document.findall(nodes.system_message)):\n            if node['level'] < filterlevel:\n                logger.debug('%s [filtered system message]', node.astext())\n                node.parent.remove(node)",
    "docstring": "Filter system messages from a doctree.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:FilterSystemMessages Assign FunctionDef name:apply arg:self arguments arg arg Assign For Call Call If Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_rule",
    "source_code": "def add_rule(self, rule: TypePromotionRule) -> None:\n    if not rule.is_valid():\n        raise ValueError(f'Invalid type promotion rule: {rule}')\n    self._rule_table[f'{rule.namespace}.{rule.op_name}'] = rule",
    "docstring": "Add a type promotion rule for a python op in a torch.ops module. Args: rule: Type promotion rule. module: Module containing the op. E.g. torch.ops.aten. Raises: ValueError: If the rule is invalid.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py",
    "ast_data": "FunctionDef name:add_rule arg:self arg:rule arguments arg arg If Call Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "local_sizes",
    "source_code": "def local_sizes(self) -> list[torch.Size]:\n    return [chunk.sizes for chunk in self._storage_meta.chunks]",
    "docstring": "Returns a list of :class:`torch.Size' corresponding to the local sizes for the shards on this rank. Returns an empty list if the current rank does not host any shards for this Tensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py",
    "ast_data": "FunctionDef name:local_sizes arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "subgridspec",
    "source_code": "def subgridspec(self, nrows, ncols, **kwargs):\n    return GridSpecFromSubplotSpec(nrows, ncols, self, **kwargs)",
    "docstring": "Create a GridSpec within this subplot. The created will have this as a parent. Parameters ---------- nrows : int Number of rows in grid. ncols : int Number of columns in grid. Returns ------- Other Parameters ---------------- **kwargs All other parameters are passed to . See Also -------- matplotlib.pyplot.subplots Examples -------- Adding three subplots in the space occupied by a single subplot:: fig = plt.figure() gs0 = fig.add_gridspec(3, 1) ax1 = fig.add_subplot(gs0[0]) ax2 = fig.add_subplot(gs0[1]) gssub = gs0[2].subgridspec(1, 3) for i in range(3): fig.add_subplot(gssub[0, i])",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:subgridspec arg:self arg:nrows arg:ncols arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "remove_rubberband",
    "source_code": "def remove_rubberband(self):\n    pass",
    "docstring": "Remove the rubberband.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:remove_rubberband arg:self arguments arg"
  },
  {
    "library": "numpy",
    "name": "around",
    "source_code": "@array_function_dispatch(_round_dispatcher)\ndef around(a, decimals=0, out=None):\n    return _wrapfunc(a, 'round', decimals=decimals, out=out)",
    "docstring": "Round an array to the given number of decimals. is an alias of . See Also -------- ndarray.round : equivalent method round : alias for this function ceil, fix, floor, rint, trunc",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\fromnumeric.py",
    "ast_data": "FunctionDef name:around arg:a arg:decimals arg:out arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "autosummary_toc_visit_html",
    "source_code": "def autosummary_toc_visit_html(self: nodes.NodeVisitor, node: autosummary_toc) -> None:\n    raise nodes.SkipNode",
    "docstring": "Hide autosummary toctree list in HTML output.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\autosummary\\__init__.py",
    "ast_data": "FunctionDef name:autosummary_toc_visit_html arg:self arg:node arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "custom_sharded_op_impl",
    "source_code": "def custom_sharded_op_impl(func):\n    return functools.partial(_decorator_func, op=func, op_table=_CUSTOM_SHARDED_OPS)",
    "docstring": "Provides a way for users to write their own custom sharded operator. This can be used to override existing ShardedTensor operators or write a new one not supported by ShardedTensor. If the operator in question is covered by `` parameter which is the process_group used for the ShardedTensor and can be used by implementations for communications within a sharded implementation. Args: func(Callable): Torch function for which we want to provide a sharded implementation (ex: torch.nn.functional.linear)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py",
    "ast_data": "FunctionDef name:custom_sharded_op_impl arg:func arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "avg_pool3d",
    "source_code": "@tf_export('nn.avg_pool3d')\n@dispatch.add_dispatch_support\ndef avg_pool3d(input, ksize, strides, padding, data_format='NDHWC', name=None):\n    with ops.name_scope(name, 'AvgPool3D', [input]) as name:\n        if data_format is None:\n            data_format = 'NDHWC'\n        channel_index = 1 if data_format.startswith('NC') else 3\n        ksize = _get_sequence(ksize, 3, channel_index, 'ksize')\n        strides = _get_sequence(strides, 3, channel_index, 'strides')\n        return gen_nn_ops.avg_pool3d(input, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=name)",
    "docstring": "Performs the average pooling on the input. Each entry in is the mean of the corresponding size window in . Args: input: A 5-D of shape and type , , , , or . ksize: An int or list of that has length , or . The size of the window for each dimension of the input tensor. strides: An int or list of that has length , or . The stride of the sliding window for each dimension of the input tensor. padding: A string, either or . The padding algorithm. See [here]( for more information. data_format: A string. 'NDHWC' and 'NCDHW' are supported. name: Optional name for the operation. Returns: A with the same type as . The average pooled output tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:avg_pool3d arg:input arg:ksize arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg With Call If Compare Assign Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_lower_model_to_backend",
    "source_code": "def _lower_model_to_backend(self, mod: torch.fx.GraphModule, inputs: Tensors) -> torch.nn.Module:\n    return mod",
    "docstring": "Lower the model to a backend.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py",
    "ast_data": "FunctionDef name:_lower_model_to_backend arg:self arg:mod arg:inputs arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "mark_step_begin",
    "source_code": "def mark_step_begin() -> None:\n    MarkStepBox.mark_step_counter -= 1",
    "docstring": "Indicates that a new iteration of inference or training is about to begin.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:mark_step_begin arguments"
  },
  {
    "library": "scikit-learn",
    "name": "rbf_kernel",
    "source_code": "@validate_params({'X': ['array-like', 'sparse matrix'], 'Y': ['array-like', 'sparse matrix', None], 'gamma': [Interval(Real, 0, None, closed='left'), None, Hidden(np.ndarray)]}, prefer_skip_nested_validation=True)\ndef rbf_kernel(X, Y=None, gamma=None):\n    xp, _ = get_namespace(X, Y)\n    X, Y = check_pairwise_arrays(X, Y)\n    if gamma is None:\n        gamma = 1.0 / X.shape[1]\n    K = euclidean_distances(X, Y, squared=True)\n    K *= -gamma\n    K = _modify_in_place_if_numpy(xp, xp.exp, K, out=K)\n    return K",
    "docstring": "Compute the rbf (gaussian) kernel between X and Y. .. code-block:: text K(x, y) = exp(-gamma ||x-y||^2) for each pair of rows x in X and y in Y. Read more in the :ref:. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples_X, n_features) A feature array. Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None An optional second feature array. If , uses . gamma : float, default=None If None, defaults to 1.0 / n_features. Returns ------- kernel : ndarray of shape (n_samples_X, n_samples_Y) The RBF kernel. Examples -------- >>> from sklearn.metrics.pairwise import rbf_kernel >>> X = [[0, 0, 0], [1, 1, 1]] >>> Y = [[1, 0, 0], [1, 1, 0]] >>> rbf_kernel(X, Y) array([[0.71, 0.51], [0.51, 0.71]])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py",
    "ast_data": "FunctionDef name:rbf_kernel arg:X arg:Y arg:gamma arguments arg arg arg Assign Call Assign Call If Compare Assign Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "DblFromGeom",
    "source_code": "class DblFromGeom(GEOSFuncFactory):\n    restype = c_int\n    errcheck = staticmethod(check_dbl)",
    "docstring": "Argument is a Geometry, return type is double that is passed in by reference as the last argument.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\misc.py",
    "ast_data": "ClassDef name:DblFromGeom Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "names",
    "source_code": "@property\ndef names(self):\n    return self._names",
    "docstring": "The list of names for each component of a staging area element.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:names arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "resolve_name",
    "source_code": "@_disable_user_warnings\ndef resolve_name(f):\n    if isinstance(f, (torch._ops.OpOverload, torch._ops.OpOverloadPacket)):\n        return str(f)\n    return _get_overridable_functions()[1].get(f)",
    "docstring": "Get a human readable string name for a function passed to __torch_function__ Arguments --------- f : Callable Function to resolve the name of. Returns ------- str Name of the function; if eval'ed it should give back the input function.",
    "type": "function",
    "file_path": "pytorch\\torch\\overrides.py",
    "ast_data": "FunctionDef name:resolve_name arg:f arguments arg If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "eigenvalues",
    "source_code": "def eigenvalues(self, m=None):\n    if m is None:\n        m = self.n\n    k = np.arange(self.n + 1 - m, self.n + 1)\n    return np.flip(16.0 * np.power(np.cos(0.5 * k * np.pi / (self.n + 1)), 4))",
    "docstring": "Return the requested number of eigenvalues. Parameters ---------- m : int, optional The positive number of smallest eigenvalues to return. If not provided, then all eigenvalues will be returned. Returns ------- eigenvalues : array The requested smallest or all eigenvalues, in ascending order.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:eigenvalues arg:self arg:m arguments arg arg If Compare Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_gross_column_widths",
    "source_code": "def _get_gross_column_widths(self) -> Sequence[int]:\n    body_column_widths = self._get_body_column_widths()\n    return [max(*widths) for widths in zip(self.header_column_widths, body_column_widths)]",
    "docstring": "Get widths of columns containing both headers and actual content.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_get_gross_column_widths arg:self arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "_compute_translation_matrix",
    "source_code": "def _compute_translation_matrix(translation: Tensor) -> Tensor:\n    matrix: Tensor = eye_like(3, translation, shared_memory=False)\n    dx, dy = torch.chunk(translation, chunks=2, dim=-1)\n    matrix[..., 0, 2:3] += dx\n    matrix[..., 1, 2:3] += dy\n    return matrix",
    "docstring": "Compute affine matrix for translation.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "FunctionDef name:_compute_translation_matrix arg:translation arguments arg Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "guarded_order",
    "source_code": "def guarded_order(self, seq):\n    seq = [*map(self.remove_precomputed_replacements, seq)]\n    seq = [(self.size_hint(var), orig_idx, var) for orig_idx, var in enumerate(seq)]\n    seq.sort()\n    order = [-1] * len(seq)\n    last_var = None\n    for new_index, (_, orig_index, var) in enumerate(seq):\n        order[orig_index] = new_index\n        if last_var is not None:\n            self.guard_leq(last_var, var)\n        last_var = var\n    return order",
    "docstring": "Return the order of a sequence as a permutation of range(len(seq)) and guard on that order not changing.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:guarded_order arg:self arg:seq arguments arg arg Assign Call Assign Call Call Call Assign Call Assign For Call Assign If Compare Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "normalize_path",
    "source_code": "def normalize_path(path):\n    if './' not in path:\n        return path\n    atoms = []\n    for atom in path.split('/'):\n        if atom == '.':\n            pass\n        elif atom == '..':\n            if atoms:\n                atoms.pop()\n        elif atom:\n            atoms.append(atom)\n    newpath = '/'.join(atoms)\n    if path.startswith('/'):\n        newpath = '/' + newpath\n    return newpath",
    "docstring": "Resolve given path from relative into absolute form.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_helper.py",
    "ast_data": "FunctionDef name:normalize_path arg:path arguments arg If Compare Return return:yes Assign For Call If Compare If Compare If Call If Call Assign Call If Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_pad_bytes",
    "source_code": "def _pad_bytes(name: AnyStr, length: int) -> AnyStr:\n    if isinstance(name, bytes):\n        return name + b'\\x00' * (length - len(name))\n    return name + '\\x00' * (length - len(name))",
    "docstring": "Take a char string and pads it with null bytes until it's length chars.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_pad_bytes arg:name arg:length arguments arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_logdir",
    "source_code": "def get_logdir(self):\n    return self.log_dir",
    "docstring": "Return the directory where event files will be written.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:get_logdir arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "adapt_method_mode",
    "source_code": "def adapt_method_mode(self, is_async, method, method_is_async=None, debug=False, name=None):\n    if method_is_async is None:\n        method_is_async = iscoroutinefunction(method)\n    if debug and (not name):\n        name = name or 'method %s()' % method.__qualname__\n    if is_async:\n        if not method_is_async:\n            if debug:\n                logger.debug('Synchronous handler adapted for %s.', name)\n            return sync_to_async(method, thread_sensitive=True)\n    elif method_is_async:\n        if debug:\n            logger.debug('Asynchronous handler adapted for %s.', name)\n        return async_to_sync(method)\n    return method",
    "docstring": "Adapt a method to be in the correct \"mode\": - If is_async is False: - Synchronous methods are left alone - Asynchronous methods are wrapped with async_to_sync - If is_async is True: - Synchronous methods are wrapped with sync_to_async() - Asynchronous methods are left alone",
    "type": "method",
    "file_path": "django\\django\\core\\handlers\\base.py",
    "ast_data": "FunctionDef name:adapt_method_mode arg:self arg:is_async arg:method arg:method_is_async arg:debug arg:name arguments arg arg arg arg arg arg If Compare Assign Call If BoolOp Assign BoolOp If If If Call Return return:yes Call If If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register_root_pre_forward_hook",
    "source_code": "@no_type_check\ndef _register_root_pre_forward_hook(state: _FSDPState, module: nn.Module):\n    for forward_handle in state._root_pre_forward_handles:\n        forward_handle.remove()\n    state._root_pre_forward_handles.clear()\n    hook = functools.partial(_root_pre_forward, state)\n    state._root_pre_forward_handles.append(module.register_forward_pre_hook(hook, prepend=True, with_kwargs=True))",
    "docstring": "Registers root pre-forward hook on `` to a module to indicate that that module is the local FSDP root. We may remove this assumption in the future, in which case we will need to register this root pre-forward hook on any candidate module that may be the local FSDP root.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_register_root_pre_forward_hook arg:state arg:module arguments arg arg For Call Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_Saveable",
    "source_code": "class _Saveable(BaseSaverBuilder.SaveableObject):\n\n    def __init__(self, table, name, table_name=None):\n        tensors = table.export()\n        specs = [BaseSaverBuilder.SaveSpec(tensors[0], '', name + '-keys'), BaseSaverBuilder.SaveSpec(tensors[1], '', name + '-values')]\n        self.table_name = table_name or name\n        super(MutableHashTable._Saveable, self).__init__(table, specs, name)\n\n    def restore(self, restored_tensors, restored_shapes):\n        del restored_shapes\n        with ops.name_scope('%s_table_restore' % self.table_name):\n            with ops.colocate_with(self.op.resource_handle):\n                return gen_lookup_ops.lookup_table_import_v2(self.op.resource_handle, restored_tensors[0], restored_tensors[1])",
    "docstring": "SaveableObject implementation for DenseHashTable.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "ClassDef name:_Saveable FunctionDef name:__init__ arg:self arg:table arg:name arg:table_name arguments arg arg arg arg Assign Call Assign Call Call Assign BoolOp Call Call FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg With Call With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_cpp_backtrace",
    "source_code": "def get_cpp_backtrace(frames_to_skip=0, maximum_number_of_frames=64) -> str:\n    return _get_cpp_backtrace(frames_to_skip, maximum_number_of_frames)",
    "docstring": "Return a string containing the C++ stack trace of the current thread. Args: frames_to_skip (int): the number of frames to skip from the top of the stack maximum_number_of_frames (int): the maximum number of frames to return",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_backtrace.py",
    "ast_data": "FunctionDef name:get_cpp_backtrace arg:frames_to_skip arg:maximum_number_of_frames arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "cos",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef cos(x):\n    return math_ops.cos(x)",
    "docstring": "Computes cos of x element-wise. Args: x: Tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:cos arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "replace_pattern_with_filters",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef replace_pattern_with_filters(gm: GraphModule, pattern: Union[Callable, Graph, GraphModule], replacement: Union[Callable, Graph, GraphModule, None]=None, match_filters: Optional[list[Callable[['InternalMatch', Graph, Graph], bool]]]=None, ignore_literals: bool=False, replacement_callback: Optional[Callable[['InternalMatch', Graph, Graph], Graph]]=None) -> list[ReplacedPatterns]:\n    return _replace_pattern(gm, pattern, replacement, match_filters, ignore_literals, replacement_callback)",
    "docstring": "See replace_pattern for documentation. This function is an overload with an additional match_filter argument. Args: ``: A function that takes in a match and returns a Graph to be used as the replacement. This allows you to construct a replacement graph based on the match.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\subgraph_rewriter.py",
    "ast_data": "FunctionDef name:replace_pattern_with_filters arg:gm arg:pattern arg:replacement arg:match_filters arg:ignore_literals arg:replacement_callback arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "validate_jti",
    "source_code": "def validate_jti(self, claims, jti):\n    raise NotImplementedError()",
    "docstring": "Validate if the given `` value is used before. Developers MUST implement this method:: def validate_jti(self, claims, jti): key = \"jti:{}-{}\".format(claims[\"sub\"], jti) if redis.get(key): return False redis.set(key, 1, ex=3600) return True",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7523\\client.py",
    "ast_data": "FunctionDef name:validate_jti arg:self arg:claims arg:jti arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "title",
    "source_code": "@property\ndef title(self) -> str:\n    if self.label is not None:\n        return self.label + (f': {self.sub_label}' if self.sub_label else '')\n    elif '\\n' not in self.stmt:\n        return self.stmt + (f': {self.sub_label}' if self.sub_label else '')\n    return f'stmt:{(f' ({self.sub_label})' if self.sub_label else '')}\\n{textwrap.indent(self.stmt, '  ')}'",
    "docstring": "Best effort attempt at a string label for the measurement.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\common.py",
    "ast_data": "FunctionDef name:title arg:self arguments arg If Compare Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "BNReLU2d",
    "source_code": "class BNReLU2d(nnq.BatchNorm2d):\n    _FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU2d\n\n    def __init__(self, num_features, eps=1e-05, momentum=0.1, device=None, dtype=None):\n        super().__init__(num_features, eps=eps, momentum=momentum, device=device, dtype=dtype)\n\n    def forward(self, input):\n        if len(input.shape) != 4:\n            raise ValueError('Input shape must be `(N, C, H, W)`!')\n        return torch.ops.quantized.batch_norm2d_relu(input, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.scale, self.zero_point)\n\n    def _get_name(self):\n        return 'QuantizedBNReLU2d'\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)\n\n    @classmethod\n    def from_reference(cls, bn_relu, output_scale, output_zero_point):\n        return super().from_reference(bn_relu[0], output_scale, output_zero_point)",
    "docstring": "A BNReLU2d module is a fused module of BatchNorm2d and ReLU We adopt the same interface as :class:. Attributes: Same as torch.ao.nn.quantized.BatchNorm2d",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\quantized\\modules\\bn_relu.py",
    "ast_data": "ClassDef name:BNReLU2d Assign FunctionDef name:__init__ arg:self arg:num_features arg:eps arg:momentum arg:device arg:dtype arguments arg arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg If Compare Call Raise Call Return return:yes Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call FunctionDef name:from_reference arg:cls arg:bn_relu arg:output_scale arg:output_zero_point arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_recompute_transform",
    "source_code": "def _recompute_transform(self):\n    assert self._patch_type in ('arc', 'circle')\n    center = (self.convert_xunits(self._center[0]), self.convert_yunits(self._center[1]))\n    width = self.convert_xunits(self._width)\n    height = self.convert_yunits(self._height)\n    self._patch_transform = mtransforms.Affine2D().scale(width * 0.5, height * 0.5).translate(*center)",
    "docstring": "Notes ----- This cannot be called until after this has been added to an Axes, otherwise unit conversion will fail. This makes it very important to call the accessor method and not directly access the transformation member variable.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:_recompute_transform arg:self arguments arg Compare Assign Call Call Assign Call Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "local_resource",
    "source_code": "def local_resource(self):\n    current_device = device_util.canonicalize(device_util.current())\n    host_device = device_util.canonicalize(device_util.get_host_for_device(current_device))\n    return self._host_to_resources.get(host_device, self._host_to_resources[next(iter(self._host_to_resources))])",
    "docstring": "Returns the resource on the local worker.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:local_resource arg:self arguments arg Assign Call Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, bymonthday=None, interval=1, tz=None):\n    if interval != int(interval) or interval < 1:\n        raise ValueError('interval must be an integer greater than 0')\n    if bymonthday is None:\n        bymonthday = range(1, 32)\n    rule = rrulewrapper(DAILY, bymonthday=bymonthday, interval=interval, **self.hms0d)\n    super().__init__(rule, tz=tz)",
    "docstring": "Parameters ---------- bymonthday : int or list of int, default: all days Ticks will be placed on every day in *bymonthday*. Default is `~datetime.tzinfotimezonedateutil.tz`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bymonthday arg:interval arg:tz arguments arg arg arg arg If BoolOp Compare Call Compare Raise Call If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_raise_if_missing",
    "source_code": "def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None:\n    if len(key) == 0:\n        return\n    missing_mask = indexer < 0\n    nmissing = missing_mask.sum()\n    if nmissing:\n        if nmissing == len(indexer):\n            raise KeyError(f'None of [{key}] are in the [{axis_name}]')\n        not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique())\n        raise KeyError(f'{not_found} not in index')",
    "docstring": "Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_raise_if_missing arg:self arg:key arg:indexer arg:axis_name arguments arg arg arg arg If Compare Call Return return:no Assign Compare Assign Call If If Compare Call Raise Call Assign Call Call Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_astuple",
    "source_code": "def _astuple(attrs):\n    cls = type(attrs)\n    fields = getattr(cls, '__attrs_attrs__', None)\n    if fields is None:\n        raise ValueError('%r is not an attrs-decorated class.' % cls)\n    values = []\n    for field in fields:\n        values.append(getattr(attrs, field.name))\n    return tuple(values)",
    "docstring": "Converts the given attrs to tuple non-recursively.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:_astuple arg:attrs arguments arg Assign Call Assign Call If Compare Raise Call Assign For Call Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "_validate_values",
    "source_code": "def _validate_values(compdict: Mapping[Any, Any]) -> None:\n    for name, value in compdict.items():\n        if value is not None and (not isinstance(value, numbers.Real)):\n            raise ValueError(f'Invalid value {value} for component {name}, please provide a real number or None instead')",
    "docstring": "Fail if a value in the components dict is not a real number or None.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\conf.py",
    "ast_data": "FunctionDef name:_validate_values arg:compdict arguments arg For Call If BoolOp Compare Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "run_shell_cmd",
    "source_code": "def run_shell_cmd(args):\n    proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n    return proc.communicate()",
    "docstring": "Executes shell commands and returns output. Args: args: String of shell commands to run. Returns: Tuple output (stdoutdata, stderrdata) from running the shell commands.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py",
    "ast_data": "FunctionDef name:run_shell_cmd arg:args arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_check_p2p_op_list",
    "source_code": "def _check_p2p_op_list(p2p_op_list) -> None:\n    if not isinstance(p2p_op_list, list) or not all((isinstance(p2p_op, P2POp) for p2p_op in p2p_op_list)):\n        raise ValueError('Invalid ``p2p_op_list``. Each op is expected to to be of type ``torch.distributed.P2POp``.')\n    group = p2p_op_list[0].group\n    if not all((group == p2p_op.group for p2p_op in p2p_op_list)):\n        raise ValueError('All ops need to use the same group.')",
    "docstring": "Check that the `` is a list of P2POp instances. Also, check that all ops use the same group.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_check_p2p_op_list arg:p2p_op_list arguments arg If BoolOp Call Call Call Raise Call Assign If Call Compare Raise Call"
  },
  {
    "library": "scipy",
    "name": "todense",
    "source_code": "def todense(self):\n    I_arr = np.eye(*self.shape, dtype=self.dtype)\n    return self._matmat(I_arr)",
    "docstring": "Return a dense array representation of this operator. Returns ------- arr : ndarray, shape=(n, n) An array with the same shape and containing the same data represented by this .",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_lbfgsb_py.py",
    "ast_data": "FunctionDef name:todense arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Bohachevsky1",
    "source_code": "class Bohachevsky1(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return x[0] ** 2 + 2 * x[1] ** 2 - 0.3 * cos(3 * pi * x[0]) - 0.4 * cos(4 * pi * x[1]) + 0.7",
    "docstring": "Bohachevsky 1 objective function. The Bohachevsky 1 [1]_ global optimization problem is a multimodal minimization problem defined as follows .. math:: f_{\\text{Bohachevsky}}(x) = \\sum_{i=1}^{n-1}\\left[x_i^2 + 2 x_{i+1}^2 - 0.3 \\cos(3 \\pi x_i) - 0.4 \\cos(4 \\pi x_{i + 1}) + 0.7 \\right] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: equation needs to be fixed up in the docstring. see Jamil#17",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:Bohachevsky1 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n    assert not model_kwargs\n    return (tuple((arg for arg in model_args if arg is not None)), {})",
    "docstring": "Remove from arguments. Args: model_args: The model args. model_kwargs: The model kwargs. model: The PyTorch model. Returns: A tuple of the model args and kwargs. Raises: ValueError: If is not empty.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Return return:yes Call Compare"
  },
  {
    "library": "pytorch",
    "name": "get_lifted_tensor_constant",
    "source_code": "def get_lifted_tensor_constant(program: 'ExportedProgram', node: torch.fx.Node) -> Optional[torch.Tensor]:\n    if is_lifted_tensor_constant(program, node):\n        lifted_tensor_name = program.graph_signature.inputs_to_lifted_tensor_constants[node.name]\n        return program.constants[lifted_tensor_name]\n    return None",
    "docstring": "Returns the lifted tensor constant associated with the given node in the exported program. Returns None if the node is not a lifted tensor constant within the exported program",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:get_lifted_tensor_constant arg:program arg:node arguments arg arg If Call Assign Return return:yes Return return:no"
  },
  {
    "library": "sphinx",
    "name": "build_mimetype",
    "source_code": "def build_mimetype(self) -> None:\n    logger.info(__('writing mimetype file...'))\n    copyfile(self.template_dir / 'mimetype', self.outdir / 'mimetype', force=True)",
    "docstring": "Write the metainfo file mimetype.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:build_mimetype arg:self arguments arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_where",
    "source_code": "def _where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor:\n    if mask.layout == torch.strided:\n        return torch.where(mask, input, fill_value)\n    elif mask.layout == torch.sparse_coo:\n        return _sparse_coo_where(mask, input, fill_value)\n    elif mask.layout == torch.sparse_csr:\n        return _sparse_csr_where(mask, input, fill_value)\n    else:\n        raise ValueError(f'_where expects strided or sparse COO or sparse CSR tensor but got {mask.layout}')",
    "docstring": "torch.where with sparse inputs support. _where implements the following invariant: _where(mask, input, fill_value).to_dense(fill_value) == torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value)) where means , mask is boolean sparse tensor, and is like except that the unspecified elements are mapped to rather than to . Returns a sparse tensor with the following features: - all specified elements correspond to masked-in elements that have the values of the input tensor. If there exists a masked-in element (as specified by mask) that is not specified in the input, in the result tensor, the corresponding element has value 0. In the dense part of the sparse tensor, the masked-out elements are replaced with fill_value. - all unspecified elements correspond to masked-out elements.",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\_ops.py",
    "ast_data": "FunctionDef name:_where arg:mask arg:input arg:fill_value arguments arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "scrapy",
    "name": "get_base_url",
    "source_code": "def get_base_url(response: TextResponse) -> str:\n    if response not in _baseurl_cache:\n        text = response.text[0:4096]\n        _baseurl_cache[response] = html.get_base_url(text, response.url, response.encoding)\n    return _baseurl_cache[response]",
    "docstring": "Return the base url of the given response, joined with the response url",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\response.py",
    "ast_data": "FunctionDef name:get_base_url arg:response arguments arg If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "partial_fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y, sample_weight=None):\n    if not hasattr(self, 'coef_'):\n        self._more_validate_params(for_partial_fit=True)\n    return self._partial_fit(X, y, self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, sample_weight=sample_weight, coef_init=None, intercept_init=None)",
    "docstring": "Perform one epoch of stochastic gradient descent on given samples. Internally, this method uses ``. Therefore, it is not guaranteed that a minimum of the cost function is reached after calling it once. Matters such as objective convergence and early stopping should be handled by the user. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Subset of training data. y : numpy array of shape (n_samples,) Subset of target values. sample_weight : array-like, shape (n_samples,), default=None Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : object Returns an instance of self.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg If Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "reset_margins",
    "source_code": "def reset_margins(self):\n    for todo in ['left', 'right', 'bottom', 'top', 'leftcb', 'rightcb', 'bottomcb', 'topcb']:\n        self.edit_margins(todo, 0.0)",
    "docstring": "Reset all the margins to zero. Must do this after changing figure size, for instance, because the relative size of the axes labels etc changes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:reset_margins arg:self arguments arg For Call"
  },
  {
    "library": "kornia",
    "name": "upscale_double",
    "source_code": "def upscale_double(x: Tensor) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(x)\n    KORNIA_CHECK_SHAPE(x, ['*', 'H', 'W'])\n    double_shape = x.shape[:-2] + (x.shape[-2] * 2, x.shape[-1] * 2)\n    upscaled = zeros(double_shape, device=x.device, dtype=x.dtype)\n    upscaled[..., ::2, ::2] = x\n    upscaled[..., ::2, 1::2][..., :-1] = (upscaled[..., ::2, ::2][..., :-1] + upscaled[..., ::2, 2::2]) / 2\n    upscaled[..., ::2, -1] = upscaled[..., ::2, -2]\n    upscaled[..., 1::2, :][..., :-1, :] = (upscaled[..., ::2, :][..., :-1, :] + upscaled[..., 2::2, :]) / 2\n    upscaled[..., -1, :] = upscaled[..., -2, :]\n    return upscaled",
    "docstring": "Upscale image by the factor of 2, even indices maps to original indices. Odd indices are linearly interpolated from the even ones. Args: x: input image. Shape: - Input: :math: - Output :math:",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\pyramid.py",
    "ast_data": "FunctionDef name:upscale_double arg:x arguments arg Call Call Assign Assign Call Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_bool_data",
    "source_code": "def get_bool_data(self) -> Self:\n    new_blocks = []\n    for blk in self.blocks:\n        if blk.dtype == bool:\n            new_blocks.append(blk)\n        elif blk.is_object:\n            new_blocks.extend((nb for nb in blk._split() if nb.is_bool))\n    return self._combine(new_blocks)",
    "docstring": "Select blocks that are bool-dtype and columns from object-dtype blocks that are all-bool.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:get_bool_data arg:self arguments arg Assign For If Compare Call If Call Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "_start_new_batch",
    "source_code": "def _start_new_batch(self, batch_id: int, uri: str, feed_options: dict[str, Any], spider: Spider, uri_template: str) -> FeedSlot:\n    storage = self._get_storage(uri, feed_options)\n    return FeedSlot(storage=storage, uri=uri, format=feed_options['format'], store_empty=feed_options['store_empty'], batch_id=batch_id, uri_template=uri_template, filter=self.filters[uri_template], feed_options=feed_options, spider=spider, exporters=self.exporters, settings=self.settings, crawler=self.crawler)",
    "docstring": "Redirect the output data stream to a new file. Execute multiple times if FEED_EXPORT_BATCH_ITEM_COUNT setting or FEEDS.batch_item_count is specified :param batch_id: sequence number of current batch :param uri: uri of the new batch to start :param feed_options: dict with parameters of feed :param spider: user spider :param uri_template: template of uri which contains %(batch_time)s or %(batch_id)d to create new uri",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\feedexport.py",
    "ast_data": "FunctionDef name:_start_new_batch arg:self arg:batch_id arg:uri arg:feed_options arg:spider arg:uri_template arguments arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "_parse_qs",
    "source_code": "def _parse_qs(qs, keep_blank_values=0, strict_parsing=0, encoding='utf-8'):\n    pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]\n    d = {}\n    for name_value in pairs:\n        if not name_value and (not strict_parsing):\n            continue\n        nv = name_value.split('=', 1)\n        if len(nv) != 2:\n            if strict_parsing:\n                raise ValueError('bad query field: %r' % (name_value,))\n            if keep_blank_values:\n                nv.append('')\n            else:\n                continue\n        if len(nv[1]) or keep_blank_values:\n            name = unquote_plus(nv[0], encoding, errors='strict')\n            value = unquote_plus(nv[1], encoding, errors='strict')\n            if name in d:\n                if not isinstance(d[name], list):\n                    d[name] = [d[name]]\n                d[name].append(value)\n            else:\n                d[name] = value\n    return d",
    "docstring": "Parse a query given as a string argument. Arguments: qs: URL-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in URL encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. Returns a dict, as G-d intended.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:_parse_qs arg:qs arg:keep_blank_values arg:strict_parsing arg:encoding arguments arg arg arg arg Assign Call Call Assign For If BoolOp Assign Call If Compare Call If Raise Call If Call If BoolOp Call Assign Call Assign Call If Compare If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "CloseSpider",
    "source_code": "class CloseSpider(Exception):\n\n    def __init__(self, reason: str='cancelled'):\n        super().__init__()\n        self.reason = reason",
    "docstring": "Raise this from callbacks to request the spider to be closed",
    "type": "class",
    "file_path": "scrapy\\scrapy\\exceptions.py",
    "ast_data": "ClassDef name:CloseSpider FunctionDef name:__init__ arg:self arg:reason arguments arg arg Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "Accuracy",
    "source_code": "class Accuracy(MeanMetricWrapper):\n\n    def __init__(self, name='accuracy', dtype=None):\n        super(Accuracy, self).__init__(accuracy, name, dtype=dtype)",
    "docstring": "Calculates how often predictions equal labels. This metric creates two local variables, and that are used to compute the frequency with which matches . This frequency is ultimately returned as : an idempotent operation that simply divides by . If is , weights default to 1. Use of 0 to mask values. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.Accuracy() >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]]) >>> m.result().numpy() 0.75 >>> m.reset_state() >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]], ... sample_weight=[1, 1, 0, 0]) >>> m.result().numpy() 0.5 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:Accuracy FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "functional_call",
    "source_code": "@deprecated('`torch.nn.utils.stateless.functional_call` is deprecated as of PyTorch 2.0 and will be removed in a future version of PyTorch. Please use `torch.func.functional_call` instead which is a drop-in replacement.', category=FutureWarning)\ndef functional_call(module: 'torch.nn.Module', parameters_and_buffers: dict[str, Tensor], args: Optional[Union[Any, tuple]]=None, kwargs: Optional[dict[str, Any]]=None, *, tie_weights: bool=True, strict: bool=False):\n    return _functional_call(module, parameters_and_buffers, args, kwargs, tie_weights=tie_weights, strict=strict)",
    "docstring": "Perform a functional call on the module by replacing the module parameters and buffers with the provided ones. .. warning:: This API is deprecated as of PyTorch 2.0 and will be removed in a future version of PyTorch. Please use :func: instead, which is a drop-in replacement for this API. .. note:: If the module has active parametrizations, passing a value in the :attr: argument with the name set to the regular parameter name will completely disable the parametrization. If you want to apply the parametrization function to the value passed please set the key as `parameters_and_buffers`.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\stateless.py",
    "ast_data": "FunctionDef name:functional_call arg:module arg:parameters_and_buffers arg:args arg:kwargs arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "get_request_object",
    "source_code": "def get_request_object(self, request_uri: str):\n    raise NotImplementedError()",
    "docstring": "Download the request object at `` parameter is supported:: class JWTAuthenticationRequest(rfc9101.JWTAuthenticationRequest): def get_request_object(self, request_uri: str): try: return requests.get(request_uri).text except requests.Exception: return None",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9101\\authorization_server.py",
    "ast_data": "FunctionDef name:get_request_object arg:self arg:request_uri arguments arg arg Raise Call"
  },
  {
    "library": "authlib",
    "name": "request",
    "source_code": "def request(self, method, url, withhold_token=False, auth=None, **kwargs):\n    if self.default_timeout:\n        kwargs.setdefault('timeout', self.default_timeout)\n    if not withhold_token and auth is None:\n        if not self.token:\n            raise MissingTokenError()\n        auth = self.token_auth\n    return super().request(method, url, auth=auth, **kwargs)",
    "docstring": "Send request with auto refresh token feature (if available).",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\requests_client\\oauth2_session.py",
    "ast_data": "FunctionDef name:request arg:self arg:method arg:url arg:withhold_token arg:auth arguments arg arg arg arg arg arg If Call If BoolOp Compare If Raise Call Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, size=None):\n    u = self.random_state.uniform(size=size)\n    if self._mirror_uniform:\n        u = 1 - u\n    r = self._rng.ppf(u)\n    if self._rvs_transform is not None:\n        r = self._rvs_transform(r, *self._frozendist.args)\n    return self.loc + self.scale * r",
    "docstring": "Sample from the distribution by inversion. Parameters ---------- size : int or tuple, optional The shape of samples. Default is `ppfNumericalInversePolynomial` method of the rv_continuous class is overwritten. Hence, a different stream of random numbers is generated even if the same seed is used.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_sampling.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:size arguments arg arg Assign Call If Assign Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "kernel",
    "source_code": "@property\ndef kernel(self):\n    return _kernel._get_handler()",
    "docstring": "The kernel currently being generated",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\virtualized.py",
    "ast_data": "FunctionDef name:kernel arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "JvpIncrementNestingCtxManagerVariable",
    "source_code": "class JvpIncrementNestingCtxManagerVariable(ContextWrappingVariable):\n    _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.FUNCTORCH_STACK_MATCH)\n\n    @staticmethod\n    def create(tx: 'InstructionTranslator', **kwargs):\n        var = JvpIncrementNestingCtxManagerVariable(target_values=None, initial_values=None, **kwargs)\n        return var\n\n    def enter(self, tx):\n        install_guard(self._guards_singleton)\n        jvp_level = torch._functorch.eager_transforms.enter_jvp_nesting()\n        self.set_cleanup_hook(tx, lambda: torch._functorch.eager_transforms.exit_jvp_nesting())\n        self.proxy = tx.output.create_node('call_function', torch._C._functorch._jvp_increment_nesting, (), {})\n        return variables.ConstantVariable.create(jvp_level)\n\n    def exit(self, tx: 'InstructionTranslator', *args):\n        self.cleanup()\n        tx.output.create_node('call_function', torch._C._functorch._jvp_decrement_nesting, (), {})\n        return variables.ConstantVariable.create(None)",
    "docstring": "represents torch.func.jvp increment/decrement nesting",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py",
    "ast_data": "ClassDef name:JvpIncrementNestingCtxManagerVariable Assign Call Call FunctionDef name:create arg:tx arguments arg arg Assign Call Return return:yes FunctionDef name:enter arg:self arg:tx arguments arg arg Call Assign Call Call arguments Call Assign Call Return return:yes Call FunctionDef name:exit arg:self arg:tx arguments arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "construct_relative_path",
    "source_code": "def construct_relative_path(current_template_name, relative_name, allow_recursion=False):\n    new_name = relative_name.strip('\\'\"')\n    if not new_name.startswith(('./', '../')):\n        return relative_name\n    if current_template_name is None:\n        raise TemplateSyntaxError(f'The relative path {relative_name} cannot be evaluated due to an unknown template origin.')\n    new_name = posixpath.normpath(posixpath.join(posixpath.dirname(current_template_name.lstrip('/')), new_name))\n    if new_name.startswith('../'):\n        raise TemplateSyntaxError(\"The relative path '%s' points outside the file hierarchy that template '%s' is in.\" % (relative_name, current_template_name))\n    if not allow_recursion and current_template_name.lstrip('/') == new_name:\n        raise TemplateSyntaxError(\"The relative path '%s' was translated to template name '%s', the same template in which the tag appears.\" % (relative_name, current_template_name))\n    has_quotes = relative_name.startswith(('\"', \"'\")) and relative_name[0] == relative_name[-1]\n    return f'\"{new_name}\"' if has_quotes else new_name",
    "docstring": "Convert a relative path (starting with './' or '../') to the full template name based on the current_template_name.",
    "type": "function",
    "file_path": "django\\django\\template\\loader_tags.py",
    "ast_data": "FunctionDef name:construct_relative_path arg:current_template_name arg:relative_name arg:allow_recursion arguments arg arg arg Assign Call If Call Return return:yes If Compare Raise Call Assign Call Call Call Call If Call Raise Call If BoolOp Compare Call Raise Call Assign BoolOp Call Compare Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_data",
    "source_code": "@property\ndef _data(self):\n    return np.ndarray.view(self, np.recarray)",
    "docstring": "Returns the data as a recarray.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:_data arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "extract_operation",
    "source_code": "def extract_operation(obj):\n    operation, args, keywords = (obj, [], {})\n    while hasattr(operation, 'func'):\n        args.extend(getattr(operation, 'args', []))\n        keywords.update(getattr(operation, 'keywords', {}))\n        operation = operation.func\n    return (operation, args, keywords)",
    "docstring": "Take a callable found in Apps._pending_operations and identify the original callable passed to Apps.lazy_model_operation(). If that callable was a partial, return the inner, non-partial function and any arguments and keyword arguments that were supplied with it. obj is a callback defined locally in Apps.lazy_model_operation() and annotated there with a attribute so as to imitate a partial.",
    "type": "function",
    "file_path": "django\\django\\core\\checks\\model_checks.py",
    "ast_data": "FunctionDef name:extract_operation arg:obj arguments arg Assign While Call Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_qconfig_dtypes",
    "source_code": "def get_qconfig_dtypes(qconfig):\n    assert qconfig is not None\n    activation = qconfig.activation()\n    weight = qconfig.weight()\n    act_is_dynamic = getattr(activation, 'is_dynamic', False)\n    return (activation.dtype, weight.dtype, act_is_dynamic)",
    "docstring": "returns the qconfig tuple for qconfig: (activation_dtype, weight_dtype, activation_is_dynamic)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:get_qconfig_dtypes arg:qconfig arguments arg Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "extrinsics",
    "source_code": "@property\ndef extrinsics(self) -> Tensor:\n    if not self._check_valid_params(self._extrinsics, 'extrinsics'):\n        raise AssertionError\n    return self._extrinsics",
    "docstring": "The full 4x4 extrinsics matrix. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:extrinsics arg:self arguments arg If Call Raise Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._dtype",
    "docstring": "The specified by this type for the SparseTensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ProcessExitedException",
    "source_code": "class ProcessExitedException(ProcessException):\n    __slots__ = ['exit_code']\n\n    def __init__(self, msg: str, error_index: int, error_pid: int, exit_code: int, signal_name: Optional[str]=None):\n        super().__init__(msg, error_index, error_pid)\n        self.exit_code = exit_code\n        self.signal_name = signal_name\n\n    def __reduce__(self):\n        return (type(self), (self.msg, self.error_index, self.pid, self.exit_code, self.signal_name))",
    "docstring": "Exception raised when a process failed due to signal or exited with a specific code.",
    "type": "class",
    "file_path": "pytorch\\torch\\multiprocessing\\spawn.py",
    "ast_data": "ClassDef name:ProcessExitedException Assign FunctionDef name:__init__ arg:self arg:msg arg:error_index arg:error_pid arg:exit_code arg:signal_name arguments arg arg arg arg arg arg Call Call Assign Assign FunctionDef name:__reduce__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "combine",
    "source_code": "def combine(self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable | None=None) -> Series:\n    if fill_value is None:\n        fill_value = na_value_for_dtype(self.dtype, compat=False)\n    if isinstance(other, Series):\n        new_index = self.index.union(other.index)\n        new_name = ops.get_op_result_name(self, other)\n        new_values = np.empty(len(new_index), dtype=object)\n        with np.errstate(all='ignore'):\n            for i, idx in enumerate(new_index):\n                lv = self.get(idx, fill_value)\n                rv = other.get(idx, fill_value)\n                new_values[i] = func(lv, rv)\n    else:\n        new_index = self.index\n        new_values = np.empty(len(new_index), dtype=object)\n        with np.errstate(all='ignore'):\n            new_values[:] = [func(lv, other) for lv in self._values]\n        new_name = self.name\n    npvalues = lib.maybe_convert_objects(new_values, try_float=False)\n    same_dtype = isinstance(self.dtype, (StringDtype, CategoricalDtype))\n    res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=same_dtype)\n    return self._constructor(res_values, index=new_index, name=new_name, copy=False)",
    "docstring": "Combine the Series with a Series or scalar according to . Combine the Series and using to perform elementwise selection for combined Series. is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the . func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:combine arg:self arg:other arg:func arg:fill_value arguments arg arg arg arg If Compare Assign Call If Call Assign Call Assign Call Assign Call Call With Call For Call Assign Call Assign Call Assign Call Assign Assign Call Call With Call Assign Call Assign Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "invert_zaxis",
    "source_code": "def invert_zaxis(self):\n    bottom, top = self.get_zlim()\n    self.set_zlim(top, bottom, auto=None)",
    "docstring": "[*Discouraged*] Invert the z-axis. .. admonition:: Discouraged The use of this method is discouraged. Use instead. See Also -------- get_zinverted get_zlim, set_zlim get_zbound, set_zbound",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:invert_zaxis arg:self arguments arg Assign Call Call"
  },
  {
    "library": "cherrypy",
    "name": "file_generator",
    "source_code": "class file_generator(object):\n\n    def __init__(self, input, chunkSize=65536):\n        self.input = input\n        self.chunkSize = chunkSize\n\n    def __iter__(self):\n        return self\n\n    def __next__(self):\n        chunk = self.input.read(self.chunkSize)\n        if chunk:\n            return chunk\n        else:\n            if hasattr(self.input, 'close'):\n                self.input.close()\n            raise StopIteration()\n    next = __next__\n\n    def __del__(self):\n        if hasattr(self.input, 'close'):\n            self.input.close()",
    "docstring": "Yield the given input (a file object) in chunks (default 64k). (Core)",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\lib\\__init__.py",
    "ast_data": "ClassDef name:file_generator FunctionDef name:__init__ arg:self arg:input arg:chunkSize arguments arg arg arg Assign Assign FunctionDef name:__iter__ arg:self arguments arg Return return:yes FunctionDef name:__next__ arg:self arguments arg Assign Call If Return return:yes If Call Call Raise Call Assign FunctionDef name:__del__ arg:self arguments arg If Call Call"
  },
  {
    "library": "django",
    "name": "has_changed",
    "source_code": "def has_changed(self):\n    return any((form.has_changed() for form in self))",
    "docstring": "Return True if data in any form differs from initial.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:has_changed arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "build_reshard_metadata",
    "source_code": "def build_reshard_metadata(st_size: torch.Size, sharding_spec: shard_spec.ShardingSpec, world_size: int) -> tuple[list[ShardMetadata], list[int]]:\n    shard_dim = int(sharding_spec.dim)\n    shards_metadata = [None] * world_size\n    ranks = []\n    offsets = [0] * len(st_size)\n    split_size = get_split_size(st_size[shard_dim], world_size)\n    for idx, placement in enumerate(sharding_spec.placements):\n        ranks.append(placement.rank())\n        sharded_dim_size = get_chunked_dim_size(st_size[shard_dim], split_size, idx)\n        local_tensor_size = list(st_size)\n        local_tensor_size[shard_dim] = sharded_dim_size\n        shards_metadata[placement.rank()] = ShardMetadata(shard_offsets=copy.deepcopy(offsets), shard_sizes=local_tensor_size, placement=placement)\n        offsets[shard_dim] += sharded_dim_size\n    return (shards_metadata, ranks)",
    "docstring": "Based the given sharding spec, we calculate the offset and local shard size. We then build a ShardMetadata on top of the calculation result. Args: st_size (torch.Size): The size of the sharded tensor. sharding_spec (:class:): The specification describing how the tensor is sharded. world_size (int): number of ranks. Returns: A Tuple of the followings: A List[] which contains the metadata for the shard, including offsets, lengths and device placement. A List[int] which contains the ranks in the order of placement.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\reshard.py",
    "ast_data": "FunctionDef name:build_reshard_metadata arg:st_size arg:sharding_spec arg:world_size arguments arg arg arg Assign Call Assign Assign Assign Call Assign Call For Call Call Call Assign Call Assign Call Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_allreduce_fut",
    "source_code": "def _allreduce_fut(process_group: dist.ProcessGroup, tensor: torch.Tensor) -> torch.futures.Future[torch.Tensor]:\n    group_to_use = process_group if process_group is not None else dist.group.WORLD\n    tensor.div_(group_to_use.size())\n    return dist.all_reduce(tensor, group=group_to_use, async_op=True).get_future().then(lambda fut: fut.value()[0])",
    "docstring": "Average the input gradient tensor by allreduce and returns a future.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\default_hooks.py",
    "ast_data": "FunctionDef name:_allreduce_fut arg:process_group arg:tensor arguments arg arg Assign Compare Call Call Return return:yes Call Call Call arguments arg Call"
  },
  {
    "library": "django",
    "name": "__init__",
    "source_code": "def __init__(self, layer_ptr, ds):\n    if not layer_ptr:\n        raise GDALException('Cannot create Layer, invalid pointer given')\n    self.ptr = layer_ptr\n    self._ds = ds\n    self._ldefn = capi.get_layer_defn(self._ptr)\n    self._random_read = self.test_capability(b'RandomRead')",
    "docstring": "Initialize on an OGR C pointer to the Layer and the object that owns this layer. The object is required so that a reference to it is kept with this Layer. This prevents garbage collection of the while this Layer is still active.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:layer_ptr arg:ds arguments arg arg arg If Raise Call Assign Assign Assign Call Assign Call"
  },
  {
    "library": "virtualenv",
    "name": "write",
    "source_code": "def write(self, content):\n    pass",
    "docstring": "Nothing to write.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\app_data\\na.py",
    "ast_data": "FunctionDef name:write arg:self arg:content arguments arg arg"
  },
  {
    "library": "django",
    "name": "_create_point",
    "source_code": "@classmethod\ndef _create_point(cls, ndim, coords):\n    if not ndim:\n        return capi.create_point(None)\n    if ndim < 2 or ndim > 3:\n        raise TypeError('Invalid point dimension: %s' % ndim)\n    cs = capi.create_cs(c_uint(1), c_uint(ndim))\n    i = iter(coords)\n    capi.cs_setx(cs, 0, next(i))\n    capi.cs_sety(cs, 0, next(i))\n    if ndim == 3:\n        capi.cs_setz(cs, 0, next(i))\n    return capi.create_point(cs)",
    "docstring": "Create a coordinate sequence, set X, Y, [Z], and create point",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\point.py",
    "ast_data": "FunctionDef name:_create_point arg:cls arg:ndim arg:coords arguments arg arg arg If Return return:yes Call If BoolOp Compare Compare Raise Call Assign Call Call Call Assign Call Call Call Call Call If Compare Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_validate_inputs",
    "source_code": "@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.VALIDATE_INPUTS)\ndef _validate_inputs(self, graph_def, input_tensors):\n    self._save_conversion_params_metric(graph_def)\n    self._quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, graph_def, self._experimental_disable_per_channel, self.experimental_new_dynamic_range_quantizer, self._experimental_low_bit_qat, self._experimental_full_integer_quantization_bias_type, self._experimental_variable_quantization, self._experimental_strict_qdq)\n    self._validate_inference_input_output_types(self._quant_mode)\n    if not self._is_unknown_shapes_allowed():\n        for tensor in input_tensors:\n            shape_list = tensor.shape.as_list()\n            if None in shape_list[1:]:\n                raise ValueError(\"None is only supported in the 1st dimension. Tensor '{0}' has invalid shape '{1}'.\".format(_get_tensor_name(tensor), shape_list))\n            elif shape_list and shape_list[0] is None:\n                shape = tensor.shape.as_list()\n                shape[0] = 1\n                tensor.set_shape(shape)\n    if self._trackable_obj is None or not hasattr(self._trackable_obj, 'graph_debug_info'):\n        self._debug_info = _get_debug_info(_build_debug_info_func(self._funcs[0].graph), graph_def)\n    else:\n        self._debug_info = _get_debug_info(_convert_debug_info_func(self._trackable_obj.graph_debug_info), graph_def)",
    "docstring": "Validate the input parameters. Args: graph_def: The TensorFlow GraphDef. input_tensors: List of input tensors. Raise: ValueError: Input shape is not specified. Invalid quantization parameters.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_validate_inputs arg:self arg:graph_def arg:input_tensors arguments arg arg arg Call Assign Call Call If Call For Assign Call If Compare Raise Call Call Call If BoolOp Compare Assign Call Assign Call If BoolOp Compare Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "sleep",
    "source_code": "def sleep(sleep_microseconds):\n\n    def _apply_fn(dataset):\n        return _SleepDataset(dataset, sleep_microseconds)\n    return _apply_fn",
    "docstring": "Sleeps for before producing each input element. Args: sleep_microseconds: The number of microseconds to sleep before producing an input element. Returns: A transformation function, which can be passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\sleep.py",
    "ast_data": "FunctionDef name:sleep arg:sleep_microseconds arguments arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "string_bytes_split",
    "source_code": "@tf_export('strings.bytes_split')\n@dispatch.add_dispatch_support\ndef string_bytes_split(input, name=None):\n    with ops.name_scope(name, 'StringsByteSplit', [input]):\n        input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input, name='input')\n        if isinstance(input, ragged_tensor.RaggedTensor):\n            return input.with_flat_values(string_bytes_split(input.flat_values))\n        rank = input.shape.ndims\n        if rank is None:\n            raise ValueError('input must have a statically-known rank.')\n        if rank == 0:\n            return string_bytes_split(array_ops_stack.stack([input]))[0]\n        elif rank == 1:\n            indices, values, shape = gen_string_ops.string_split(input, delimiter='', skip_empty=False)\n            return ragged_tensor.RaggedTensor.from_value_rowids(values=values, value_rowids=indices[:, 0], nrows=shape[0], validate=False)\n        else:\n            return string_bytes_split(ragged_tensor.RaggedTensor.from_tensor(input))",
    "docstring": "Split string elements of into bytes. Examples: >>> tf.strings.bytes_split('hello').numpy() array([b'h', b'e', b'l', b'l', b'o'], dtype=object) >>> tf.strings.bytes_split(['hello', '123']) Note that this op splits strings into bytes, not unicode characters. To split strings into unicode characters, use . See also: , , . Args: input: A string or : the strings to split. Must have a statically known rank (). name: A name for the operation (optional). Returns: A of rank : the bytes that make up the source strings.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_string_ops.py",
    "ast_data": "FunctionDef name:string_bytes_split arg:input arg:name arguments arg arg With Call Assign Call If Call Return return:yes Call Call Assign If Compare Raise Call If Compare Return return:yes Call Call If Compare Assign Call Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_prepare_feed_values",
    "source_code": "def _prepare_feed_values(model, inputs, targets, sample_weights, mode):\n    strategy = model._distribution_strategy\n    inputs, targets, sample_weights = _get_input_from_iterator(inputs, model)\n    if backend.is_tpu_strategy(strategy):\n        if sample_weights is not None:\n            raise ValueError('TPUStrategy does not support sample weights.')\n    if isinstance(inputs, dict):\n        inputs = [inputs[key] for key in model._feed_input_names]\n    if is_distributing_by_cloning(model):\n        inputs = flatten_per_replica_values(strategy, inputs)\n        targets = flatten_per_replica_values(strategy, targets)\n        inputs, targets = nest.map_structure(training_utils_v1.standardize_single_array, (inputs, targets))\n    else:\n        inputs = training_utils_v1.ModelInputs(inputs).as_list()\n    if mode == ModeKeys.PREDICT:\n        sample_weights = []\n        targets = []\n    elif sample_weights is not None and is_distributing_by_cloning(model):\n        if context.executing_eagerly() and (not model._compile_distribution):\n            raise NotImplementedError('`sample_weight` is not supported when using tf.distribute.Strategy in eager mode and cloning=True.')\n        sample_weights = flatten_per_replica_values(strategy, sample_weights)\n    ins = [inputs, targets, sample_weights]\n    return tuple(ins)",
    "docstring": "Prepare feed values to the model execution function. Args: model: Model to prepare feed values for. inputs: List or dict of model inputs. targets: Optional list of model targets. sample_weights: Optional list of sample weight arrays. mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT. Returns: Feed values for the model in the given mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_prepare_feed_values arg:model arg:inputs arg:targets arg:sample_weights arg:mode arguments arg arg arg arg arg Assign Assign Call If Call If Compare Raise Call If Call Assign If Call Assign Call Assign Call Assign Call Assign Call Call If Compare Assign Assign If BoolOp Compare Call If BoolOp Call Raise Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    xp, _ = get_namespace(X)\n    X = validate_data(self, X, copy=self.copy, dtype=_array_api.supported_float_dtypes(xp), force_writeable=True, ensure_all_finite='allow-nan', reset=False)\n    X *= self.scale_\n    X += self.min_\n    if self.clip:\n        device_ = device(X)\n        X = _modify_in_place_if_numpy(xp, xp.clip, X, xp.asarray(self.feature_range[0], dtype=X.dtype, device=device_), xp.asarray(self.feature_range[1], dtype=X.dtype, device=device_), out=X)\n    return X",
    "docstring": "Scale features of X according to feature_range. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data that will be transformed. Returns ------- Xt : ndarray of shape (n_samples, n_features) Transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call Assign Call Call If Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "check_geom",
    "source_code": "def check_geom(result, func, cargs):\n    if not result:\n        raise GEOSException('Error encountered checking Geometry returned from GEOS C function \"%s\".' % func.__name__)\n    return result",
    "docstring": "Error checking on routines that return Geometries.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\errcheck.py",
    "ast_data": "FunctionDef name:check_geom arg:result arg:func arg:cargs arguments arg arg arg If Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "kaiser_beta",
    "source_code": "def kaiser_beta(a):\n    if a > 50:\n        beta = 0.1102 * (a - 8.7)\n    elif a > 21:\n        beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)\n    else:\n        beta = 0.0\n    return beta",
    "docstring": "Compute the Kaiser parameter , given the attenuation . Parameters ---------- a : float The desired attenuation in the stopband and maximum ripple in the passband, in dB. This should be a *positive* number. Returns ------- beta : float The parameter to be used in the formula for a Kaiser window. References ---------- Oppenheim, Schafer, \"Discrete-Time Signal Processing\", p.475-476. Examples -------- Suppose we want to design a lowpass filter, with 65 dB attenuation in the stop band. The Kaiser window parameter to be used in the window method is computed by ``: >>> from scipy.signal import kaiser_beta >>> kaiser_beta(65) 6.20426",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_fir_filter_design.py",
    "ast_data": "FunctionDef name:kaiser_beta arg:a arguments arg If Compare Assign If Compare Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_hatch_path",
    "source_code": "def get_hatch_path(self, density=6.0):\n    hatch = self.get_hatch()\n    if hatch is None:\n        return None\n    return Path.hatch(hatch, density)",
    "docstring": "Return a for the current hatch.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_hatch_path arg:self arg:density arguments arg arg Assign Call If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return TEMPLATE_FRAMESET % self.root.lower()",
    "docstring": "Render the coverage stats index page.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\covercp.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_control_dependencies_for_inputs",
    "source_code": "def _control_dependencies_for_inputs(self, input_ops) -> list[Operation]:\n    ret = []\n    for controller in self._control_dependencies_stack:\n        dominated = False\n        for op in input_ops:\n            if controller.op_in_group(op):\n                dominated = True\n                break\n        if not dominated:\n            ret.extend((c for c in controller.control_inputs if c not in input_ops))\n    return ret",
    "docstring": "For an op that takes as inputs, compute control inputs. The returned control dependencies should yield an execution that is equivalent to adding all control inputs in self._control_dependencies_stack to a newly created op. However, this function attempts to prune the returned control dependencies by observing that nodes created within the same block may have data dependencies that make the explicit approach redundant. Args: input_ops: The data input ops for an op to be created. Returns: A list of control inputs for the op to be created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_control_dependencies_for_inputs arg:self arg:input_ops arguments arg arg Assign For Assign For If Call Assign If Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_prod",
    "source_code": "def _prod(lst):\n    return functools.reduce(operator.mul, lst, 1)",
    "docstring": "Returns the product of the numbers in a list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_prod arg:lst arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_as_cluster_device_filters",
    "source_code": "def _as_cluster_device_filters(self):\n    if self._cluster_device_filters:\n        return self._cluster_device_filters\n    self._make_cluster_device_filters()\n    return self._cluster_device_filters",
    "docstring": "Returns a serialized protobuf of cluster device filters.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py",
    "ast_data": "FunctionDef name:_as_cluster_device_filters arg:self arguments arg If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "softmax",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef softmax(x, axis=-1):\n    return nn.softmax(x, axis=axis)",
    "docstring": "Softmax of a tensor. Args: x: A tensor or variable. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:softmax arg:x arg:axis arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cluster_spec, master='', task_type=None, task_id=None, environment='', num_accelerators=None, rpc_layer=None):\n    super(SimpleClusterResolver, self).__init__()\n    self._task_type = task_type\n    self._task_id = task_id\n    self._environment = environment\n    self._num_accelerators = num_accelerators\n    self._rpc_layer = rpc_layer\n    if not isinstance(cluster_spec, ClusterSpec):\n        raise TypeError('cluster_spec must be a `tf.train.ClusterSpec`.')\n    self._cluster_spec = cluster_spec\n    if not isinstance(master, str):\n        raise TypeError('master must be a string.')\n    self._master = master",
    "docstring": "Creates a SimpleClusterResolver from a ClusterSpec.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cluster_spec arg:master arg:task_type arg:task_id arg:environment arg:num_accelerators arg:rpc_layer arguments arg arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign If Call Raise Call Assign If Call Raise Call Assign"
  },
  {
    "library": "django",
    "name": "get_app_list",
    "source_code": "def get_app_list(self, request, app_label=None):\n    app_dict = self._build_app_dict(request, app_label)\n    app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())\n    for app in app_list:\n        app['models'].sort(key=lambda x: x['name'])\n    return app_list",
    "docstring": "Return a sorted list of all the installed apps that have been registered in this site.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:get_app_list arg:self arg:request arg:app_label arguments arg arg arg Assign Call Assign Call Call arguments arg Call For Call arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "createVariable",
    "source_code": "def createVariable(self, name, type, dimensions):\n    shape = tuple([self.dimensions[dim] for dim in dimensions])\n    shape_ = tuple([dim or 0 for dim in shape])\n    type = dtype(type)\n    typecode, size = (type.char, type.itemsize)\n    if (typecode, size) not in REVERSE:\n        raise ValueError(f'NetCDF 3 does not support type {type}')\n    data = empty(shape_, dtype=type.newbyteorder('B'))\n    self.variables[name] = netcdf_variable(data, typecode, size, shape, dimensions, maskandscale=self.maskandscale)\n    return self.variables[name]",
    "docstring": "Create an empty variable for the object, specifying its data type and the dimensions it uses. Parameters ---------- name : str Name of the new variable. type : dtype or str Data type of the variable. dimensions : sequence of str List of the dimension names used by the variable, in the desired order. Returns ------- variable : netcdf_variable The newly created `netcdf_filecreateDimension` prior to creating the NetCDF variable.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_netcdf.py",
    "ast_data": "FunctionDef name:createVariable arg:self arg:name arg:type arg:dimensions arguments arg arg arg arg Assign Call Assign Call BoolOp Assign Call Assign If Compare Raise Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "write_metrics_markdown",
    "source_code": "def write_metrics_markdown(out: TextIO, metrics: list[list[str]]):\n    name_width = max((len(m[0]) for m in metrics))\n    value_width = max(max((len(m[1]) for m in metrics)), len('value'))\n    unit_width = max(max((len(m[2]) for m in metrics)), len('unit'))\n    out.write(f'{'Metric'.ljust(name_width)} | {'Value'.rjust(value_width)} | Unit\\n')\n    out.write('-' * name_width)\n    out.write('-|-')\n    out.write('-' * value_width)\n    out.write('-|-')\n    out.write('-' * unit_width + '\\n')\n    for name, value, unit in metrics:\n        out.write(f'{name.ljust(name_width)} | {value.rjust(value_width)} | {unit}\\n')",
    "docstring": "Formats metrics in markdown.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\backends\\gpu\\codegen\\tools\\ncu_rep_lib.py",
    "ast_data": "FunctionDef name:write_metrics_markdown arg:out arg:metrics arguments arg arg Assign Call Call Assign Call Call Call Call Assign Call Call Call Call Call Call Call Call Call Call Call Call For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "clear_user_hooks",
    "source_code": "def clear_user_hooks(self):\n    self._user_pre_fw_hook = None\n    self._user_post_fw_hook = None\n    self._user_pre_bw_hook = None\n    self._user_post_bw_hook = None",
    "docstring": "Clears the user specified hooks registered with ``",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\mod_tracker.py",
    "ast_data": "FunctionDef name:clear_user_hooks arg:self arguments arg Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "avoid_wrapping",
    "source_code": "def avoid_wrapping(value):\n    return value.replace(' ', '\\xa0')",
    "docstring": "Avoid text wrapping in the middle of a phrase by adding non-breaking spaces where there previously were normal spaces.",
    "type": "function",
    "file_path": "django\\django\\utils\\html.py",
    "ast_data": "FunctionDef name:avoid_wrapping arg:value arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_all_gather_base_input",
    "source_code": "def _all_gather_base_input(input, pg):\n    gather_inp_size = list(input.size())\n    gather_inp_size[0] = input.size(0) * dist.get_world_size(pg)\n    gather_inp = torch.empty(gather_inp_size, device=input.device, dtype=input.dtype)\n    return _all_gather_base(gather_inp, input, group=pg)",
    "docstring": "Use _all_gather_base to get a concatenated input from each rank. Args: input: tensor to be applied op on. pg: process group. Returns: gathered_inputs: input gathered from each rank and concat by dim 0.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\chunk_sharding_spec_ops\\_common.py",
    "ast_data": "FunctionDef name:_all_gather_base_input arg:input arg:pg arguments arg arg Assign Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "deprecated",
    "source_code": "def deprecated(use_instead: Any=None) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:\n\n    def deco(func: Callable[_P, _T]) -> Callable[_P, _T]:\n\n        @wraps(func)\n        def wrapped(*args: _P.args, **kwargs: _P.kwargs) -> Any:\n            message = f'Call to deprecated function {func.__name__}.'\n            if use_instead:\n                message += f' Use {use_instead} instead.'\n            warnings.warn(message, category=ScrapyDeprecationWarning, stacklevel=2)\n            return func(*args, **kwargs)\n        return wrapped\n    if callable(use_instead):\n        deco = deco(use_instead)\n        use_instead = None\n    return deco",
    "docstring": "This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\decorators.py",
    "ast_data": "FunctionDef name:deprecated arg:use_instead arguments arg FunctionDef name:deco arg:func arguments arg FunctionDef name:wrapped arguments arg arg Assign If Call Return return:yes Call Call Return return:yes If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_cast_buffers_to_dtype_and_device",
    "source_code": "def _cast_buffers_to_dtype_and_device(buffers: list[torch.Tensor], buffer_dtypes: list[Optional[torch.dtype]], device: torch.device) -> None:\n    _p_assert(buffer_dtypes is None or len(buffers) == len(buffer_dtypes), f'Expects `buffers` and `buffer_dtypes` to have the same length if `buffer_dtypes` is specified but got {len(buffers)} and {len(buffer_dtypes)}')\n    for buffer, buffer_dtype in zip(buffers, buffer_dtypes):\n        if not torch.is_floating_point(buffer) or buffer_dtype is None:\n            buffer.data = buffer.to(device=device)\n        else:\n            buffer.data = buffer.to(device=device, dtype=buffer_dtype)",
    "docstring": "Casts ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_cast_buffers_to_dtype_and_device arg:buffers arg:buffer_dtypes arg:device arguments arg arg arg Call BoolOp Compare Compare Call Call Call Call For Call If BoolOp Call Compare Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "ber_zeros",
    "source_code": "def ber_zeros(nt):\n    if not isscalar(nt) or floor(nt) != nt or nt <= 0:\n        raise ValueError('nt must be positive integer scalar.')\n    return _specfun.klvnzo(nt, 1)",
    "docstring": "Compute nt zeros of the Kelvin function ber. Parameters ---------- nt : int Number of zeros to compute. Must be positive. Returns ------- ndarray First zeros of the Kelvin function. See Also -------- ber References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:ber_zeros arg:nt arguments arg If BoolOp Call Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_frexp",
    "source_code": "def register_frexp():\n    name = 'frexp'\n    frexp = ops_wrapper('frexp')\n\n    def frexp0(*args, **kwargs):\n        return frexp(*args, **kwargs)[0]\n\n    def frexp1(*args, **kwargs):\n        return frexp(*args, **kwargs)[1]\n    pw_fns = [make_pointwise(frexp0), make_pointwise(frexp1, override_return_dtype=torch.int32)]\n\n    def fn(*args, **kwargs):\n        return (pw_fns[0](*args, **kwargs), pw_fns[1](*args, **kwargs))\n    fn = register_lowering(aten.frexp)(fn)\n    if hasattr(prims, name):\n        register_lowering(getattr(prims, name), type_promotion_kind=None)(fn)\n    return fn",
    "docstring": "A pointwise function that maps ops.frexp to inputs",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:register_frexp arguments Assign Assign Call FunctionDef name:frexp0 arguments arg arg Return return:yes Call FunctionDef name:frexp1 arguments arg arg Return return:yes Call Assign Call Call FunctionDef name:fn arguments arg arg Return return:yes Call Call Assign Call Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_reindex_and_concat",
    "source_code": "@final\ndef _reindex_and_concat(self, join_index: Index, left_indexer: npt.NDArray[np.intp] | None, right_indexer: npt.NDArray[np.intp] | None) -> DataFrame:\n    left = self.left[:]\n    right = self.right[:]\n    llabels, rlabels = _items_overlap_with_suffix(self.left._info_axis, self.right._info_axis, self.suffixes)\n    if left_indexer is not None and (not is_range_indexer(left_indexer, len(left))):\n        lmgr = left._mgr.reindex_indexer(join_index, left_indexer, axis=1, only_slice=True, allow_dups=True, use_na_proxy=True)\n        left = left._constructor_from_mgr(lmgr, axes=lmgr.axes)\n    left.index = join_index\n    if right_indexer is not None and (not is_range_indexer(right_indexer, len(right))):\n        rmgr = right._mgr.reindex_indexer(join_index, right_indexer, axis=1, only_slice=True, allow_dups=True, use_na_proxy=True)\n        right = right._constructor_from_mgr(rmgr, axes=rmgr.axes)\n    right.index = join_index\n    from pandas import concat\n    left.columns = llabels\n    right.columns = rlabels\n    result = concat([left, right], axis=1)\n    return result",
    "docstring": "reindex along index and concat along columns.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:_reindex_and_concat arg:self arg:join_index arg:left_indexer arg:right_indexer arguments arg arg arg arg Assign Assign Assign Call If BoolOp Compare Call Call Assign Call Assign Call Assign If BoolOp Compare Call Call Assign Call Assign Call Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "FxCompile",
    "source_code": "class FxCompile(ABC):\n    _compile_stats: dict[type[FxCompile], _FxCompileStat] = defaultdict(_FxCompileStat)\n\n    @abstractmethod\n    def codegen_and_compile(self, gm: GraphModule, example_inputs: Sequence[InputType], inputs_to_check: Sequence[int], graph_kwargs: _CompileFxKwargs) -> OutputCode:\n        ...\n\n    @classmethod\n    def _reset_stats(cls) -> None:\n        cls._compile_stats.clear()",
    "docstring": "An FxCompile represents a mechanism that can turn a GraphModule into an OutputCode.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx.py",
    "ast_data": "ClassDef name:FxCompile Call FunctionDef name:codegen_and_compile arg:self arg:gm arg:example_inputs arg:inputs_to_check arg:graph_kwargs arguments arg arg arg arg arg FunctionDef name:_reset_stats arg:cls arguments arg Call"
  },
  {
    "library": "cherrypy",
    "name": "unsubscribe",
    "source_code": "def unsubscribe(self, channel, callback):\n    listeners = self.listeners.get(channel)\n    if listeners and callback in listeners:\n        listeners.discard(callback)\n        del self._priorities[channel, callback]",
    "docstring": "Discard the given callback (if present).",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:unsubscribe arg:self arg:channel arg:callback arguments arg arg arg Assign Call If BoolOp Compare Call"
  },
  {
    "library": "pandas",
    "name": "validate_minmax_axis",
    "source_code": "def validate_minmax_axis(axis: AxisInt | None, ndim: int=1) -> None:\n    if axis is None:\n        return\n    if axis >= ndim or (axis < 0 and ndim + axis < 0):\n        raise ValueError(f'`axis` must be fewer than the number of dimensions ({ndim})')",
    "docstring": "Ensure that the axis argument passed to min, max, argmin, or argmax is zero or None, as otherwise it will be incorrectly ignored. Parameters ---------- axis : int or None ndim : int, default 1 Raises ------ ValueError",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\numpy\\function.py",
    "ast_data": "FunctionDef name:validate_minmax_axis arg:axis arg:ndim arguments arg arg If Compare Return return:no If BoolOp Compare BoolOp Compare Compare Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "__call__",
    "source_code": "def __call__(self, *args, **kwargs):\n    response = cherrypy.serving.response\n    self.body = self.oldhandler(*args, **kwargs)\n    self.body = prepare_iter(self.body)\n    ct = response.headers.elements('Content-Type')\n    if self.debug:\n        cherrypy.log('Content-Type: %r' % [str(h) for h in ct], 'TOOLS.ENCODE')\n    if ct and self.add_charset:\n        ct = ct[0]\n        if self.text_only:\n            if ct.value.lower().startswith('text/'):\n                if self.debug:\n                    cherrypy.log('Content-Type %s starts with \"text/\"' % ct, 'TOOLS.ENCODE')\n                do_find = True\n            else:\n                if self.debug:\n                    cherrypy.log('Not finding because Content-Type %s does not start with \"text/\"' % ct, 'TOOLS.ENCODE')\n                do_find = False\n        else:\n            if self.debug:\n                cherrypy.log('Finding because not text_only', 'TOOLS.ENCODE')\n            do_find = True\n        if do_find:\n            ct.params['charset'] = self.find_acceptable_charset()\n            if self.debug:\n                cherrypy.log('Setting Content-Type %s' % ct, 'TOOLS.ENCODE')\n            response.headers['Content-Type'] = str(ct)\n    return self.body",
    "docstring": "Set up encoding for the HTTP response.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\encoding.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg Assign Assign Call Assign Call Assign Call If Call Call If BoolOp Assign If If Call Call If Call Assign If Call Assign If Call Assign If Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "as1Dbatch",
    "source_code": "def as1Dbatch(tensor):\n    while tensor.ndim < 3:\n        tensor = tensor.unsqueeze(0)\n    if tensor.ndim > 3:\n        tensor = tensor.flatten(0, tensor.ndim - 3)\n    assert tensor.ndim == 3, tensor.shape\n    return tensor",
    "docstring": "Return tensor as 3D tensor by either prepending new dimensions to the tensor shape (when ``).",
    "type": "function",
    "file_path": "pytorch\\torch\\sparse\\_triton_ops.py",
    "ast_data": "FunctionDef name:as1Dbatch arg:tensor arguments arg While Compare Assign Call If Compare Assign Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_cpu0",
    "source_code": "def set_cpu0(device_string):\n    if context.is_custom_device(device_string):\n        return device_string\n    parsed_device = pydev.DeviceSpec.from_string(device_string)\n    parsed_device = parsed_device.replace(device_type='CPU', device_index=0)\n    return parsed_device.to_string()",
    "docstring": "Creates a new device string based on but using /CPU:0. If the device is already on /CPU:0 or it is a custom device, this is a no-op. Args: device_string: A device string. Returns: A device string.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:set_cpu0 arg:device_string arguments arg If Call Return return:yes Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "DropPath",
    "source_code": "class DropPath(Module):\n\n    def __init__(self, drop_prob: Optional[float]=None) -> None:\n        super().__init__()\n        self.drop_prob = drop_prob\n\n    def forward(self, x: Tensor) -> Tensor:\n        return drop_path(x, self.drop_prob, self.training)",
    "docstring": "Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\dedode\\transformer\\layers\\drop_path.py",
    "ast_data": "ClassDef name:DropPath FunctionDef name:__init__ arg:self arg:drop_prob arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, loss_tensor, fail_on_nan_loss=True):\n    self._loss_tensor = loss_tensor\n    self._fail_on_nan_loss = fail_on_nan_loss",
    "docstring": "Initializes a . Args: loss_tensor: , the loss tensor. fail_on_nan_loss: , whether to raise exception when loss is NaN.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:loss_tensor arg:fail_on_nan_loss arguments arg arg arg Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "all_functions",
    "source_code": "def all_functions():\n    from ._testing import ignore_warnings\n    all_functions = []\n    root = str(Path(__file__).parent.parent)\n    with ignore_warnings(category=FutureWarning):\n        for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix='sklearn.'):\n            module_parts = module_name.split('.')\n            if any((part in _MODULE_TO_IGNORE for part in module_parts)) or '._' in module_name:\n                continue\n            module = import_module(module_name)\n            functions = inspect.getmembers(module, _is_checked_function)\n            functions = [(func.__name__, func) for name, func in functions if not name.startswith('_')]\n            all_functions.extend(functions)\n    return sorted(set(all_functions), key=itemgetter(0))",
    "docstring": "Get a list of all functions from . Returns ------- functions : list of tuples List of (name, function), where `` is the actual function. Examples -------- >>> from sklearn.utils.discovery import all_functions >>> functions = all_functions() >>> name, function = functions[0] >>> name 'accuracy_score'",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\discovery.py",
    "ast_data": "FunctionDef name:all_functions arguments Assign Assign Call Call With Call For Call Assign Call If BoolOp Call Compare Compare Assign Call Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "MoveModuleTargets",
    "source_code": "class MoveModuleTargets(SphinxTransform):\n    default_priority = 210\n\n    def apply(self, **kwargs: Any) -> None:\n        for node in list(self.document.findall(nodes.target)):\n            if not node['ids']:\n                continue\n            if 'ismod' in node and type(node.parent) is nodes.section and (node.parent.index(node) == 2):\n                node.parent['ids'][0:0] = node['ids']\n                node.parent.remove(node)",
    "docstring": "Move module targets that are the first thing in a section to the section title. XXX Python specific",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:MoveModuleTargets Assign FunctionDef name:apply arg:self arguments arg arg For Call Call If If BoolOp Compare Compare Call Compare Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_code",
    "source_code": "def _get_code(input_file):\n    raw_code = []\n    with open(input_file) as in_file:\n        notebook = json.load(in_file)\n    cell_index = 0\n    for cell in notebook['cells']:\n        if is_python(cell):\n            cell_lines = cell['source']\n            is_line_split = False\n            for line_idx, code_line in enumerate(cell_lines):\n                if skip_magic(code_line, ['%', '!', '?']) or is_line_split:\n                    code_line = '###!!!' + code_line\n                    is_line_split = check_line_split(code_line)\n                if is_line_split:\n                    is_line_split = check_line_split(code_line)\n                if line_idx == len(cell_lines) - 1 and code_line.endswith('\\n'):\n                    code_line = code_line.replace('\\n', '###===')\n                raw_code.append(CodeLine(cell_index, code_line.rstrip().replace('\\n', '###===')))\n            cell_index += 1\n    return (raw_code, notebook)",
    "docstring": "Loads the ipynb file and returns a list of CodeLines.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ipynb.py",
    "ast_data": "FunctionDef name:_get_code arg:input_file arguments arg Assign With Call Assign Call Assign For If Call Assign Assign For Call If BoolOp Call Assign Assign Call If Assign Call If BoolOp Compare Call Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "@available_if(_final_estimator_has('decision_function'))\ndef decision_function(self, X, **params):\n    with _raise_or_warn_if_not_fitted(self):\n        _raise_for_params(params, self, 'decision_function')\n        routed_params = process_routing(self, 'decision_function', **params)\n        Xt = X\n        for _, name, transform in self._iter(with_final=False):\n            Xt = transform.transform(Xt, **routed_params.get(name, {}).get('transform', {}))\n        return self.steps[-1][1].decision_function(Xt, **routed_params.get(self.steps[-1][0], {}).get('decision_function', {}))",
    "docstring": "Transform the data, and apply with the final estimator. Call of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls method. Only valid if the final estimator implements . Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of string -> object Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 1.4 Only available if . See :ref: for more details. Returns ------- y_score : ndarray of shape (n_samples, n_classes) Result of calling on the final estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg arg With Call Call Assign Call Assign For Call Assign Call Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_existing_objects_matched",
    "source_code": "@abc.abstractmethod\ndef assert_existing_objects_matched(self):\n    pass",
    "docstring": "Raises an exception unless existing Python objects have been matched.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:assert_existing_objects_matched arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "get_partition_cudagraph_metadata",
    "source_code": "def get_partition_cudagraph_metadata(partition_map: GraphPartitionMap, metadata: CudagraphMetadata) -> CudagraphMetadata:\n    partition_placeholders = []\n    partition_static_input_idxs: OrderedSet[int] = OrderedSet()\n    partition_mutated_input_idxs: OrderedSet[int] = OrderedSet()\n    for partition_input_idx, graph_input_idx in enumerate(partition_map.input_index_mapping):\n        if graph_input_idx in metadata.static_input_idxs:\n            partition_static_input_idxs.add(partition_input_idx)\n        if graph_input_idx in metadata.mutated_input_idxs:\n            partition_mutated_input_idxs.add(partition_input_idx)\n        if graph_input_idx is not None:\n            placeholder = metadata.placeholders[graph_input_idx]\n        else:\n            placeholder = PlaceholderInfo(name=f'partition_{partition_map.id}_placeholder_{partition_input_idx}', stack_trace=None, users=[], mutating_use_stack_trace=None)\n        partition_placeholders.append(placeholder)\n    partition_stack_traces = []\n    for graph_output_idx in partition_map.output_index_mapping:\n        if graph_output_idx is not None:\n            partition_stack_traces.append(metadata.stack_traces[graph_output_idx])\n        else:\n            partition_stack_traces.append(None)\n    partition_constants = {name: metadata.constants[name] for name in partition_map.constant_names}\n    return CudagraphMetadata(partition_placeholders, partition_static_input_idxs, partition_mutated_input_idxs, partition_stack_traces, partition_constants)",
    "docstring": "Convert the cudagraph metadata at the graph level to the graph partition level, given the graph partition info (i.e., mapping from partition input/output index to graph input/output index).",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_utils.py",
    "ast_data": "FunctionDef name:get_partition_cudagraph_metadata arg:partition_map arg:metadata arguments arg arg Assign Call Call For Call If Compare Call If Compare Call If Compare Assign Assign Call Call Assign For If Compare Call Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "git_config_guard",
    "source_code": "@contextlib.contextmanager\ndef git_config_guard(repo: GitRepo) -> Generator[None, None, None]:\n    user_email = repo._run_git('config', 'user.email')\n    user_name = repo._run_git('config', 'user.name')\n    try:\n        yield\n    finally:\n        if user_email:\n            repo._run_git('config', '--global', 'user.email', user_email)\n        if user_name:\n            repo._run_git('config', '--global', 'user.name', user_name)",
    "docstring": "Restores user.name and user.email global properties after context is finished",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\tryrebase.py",
    "ast_data": "FunctionDef name:git_config_guard arg:repo arguments arg Assign Call Assign Call Try If Call If Call"
  },
  {
    "library": "scrapy",
    "name": "sitemap_filter",
    "source_code": "def sitemap_filter(self, entries: Iterable[dict[str, Any]]) -> Iterable[dict[str, Any]]:\n    yield from entries",
    "docstring": "This method can be used to filter sitemap entries by their attributes, for example, you can filter locs with lastmod greater than a given date (see docs).",
    "type": "method",
    "file_path": "scrapy\\scrapy\\spiders\\sitemap.py",
    "ast_data": "FunctionDef name:sitemap_filter arg:self arg:entries arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "outbound_nodes",
    "source_code": "@property\n@doc_controls.do_not_doc_inheritable\ndef outbound_nodes(self):\n    return self._outbound_nodes",
    "docstring": "Deprecated, do NOT use! Only for compatibility with external Keras.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:outbound_nodes arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_tpu_core_ids",
    "source_code": "def set_tpu_core_ids(self, mesh_name, tpu_core_ids):\n    _pywrap_dtensor_device.SetTPUCoreIDs(self._device_info, mesh_name, tpu_core_ids)",
    "docstring": "Sets the singleton global device ID-to-physical core ID map. Args: mesh_name: The name of a mesh. If empty, set the default mapping. tpu_core_ids: TPU core IDs sorted by TF task/device ordinal.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py",
    "ast_data": "FunctionDef name:set_tpu_core_ids arg:self arg:mesh_name arg:tpu_core_ids arguments arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "load_seed",
    "source_code": "def load_seed(self, name: str, offset: T) -> T:\n    raise NotImplementedError",
    "docstring": "Computes inductor_prims.lookup_seed.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:load_seed arg:self arg:name arg:offset arguments arg arg arg Raise"
  },
  {
    "library": "numpy",
    "name": "__dir__",
    "source_code": "def __dir__(self):\n    return list(object.__getattribute__(self, '_obj').keys())",
    "docstring": "Enables dir(bagobj) to list the files in an NpzFile. This also enables tab-completion in an interpreter or IPython.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_npyio_impl.py",
    "ast_data": "FunctionDef name:__dir__ arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "Lambda",
    "source_code": "class Lambda(Module):\n\n    def __init__(self, fcn: Callable[..., Any]) -> None:\n        super().__init__()\n        self.fcn = fcn\n\n    def forward(self, x: Tensor) -> Any:\n        return self.fcn(x)",
    "docstring": "Module to create a lambda function as Module. Args: fcn: a pointer to any function. Example: >>> import torch >>> import kornia as K >>> fcn = Lambda(lambda x: K.geometry.resize(x, (32, 16))) >>> fcn(torch.rand(1, 4, 64, 32)).shape torch.Size([1, 4, 32, 16])",
    "type": "class",
    "file_path": "kornia\\kornia\\x\\utils.py",
    "ast_data": "ClassDef name:Lambda FunctionDef name:__init__ arg:self arg:fcn arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "output_json",
    "source_code": "def output_json(filename, headers, row):\n    origin = ''\n    if 'torchbench' in filename:\n        origin = 'torchbench'\n    elif 'huggingface' in filename:\n        origin = 'huggingface'\n    elif 'timm_models' in filename:\n        origin = 'timm_models'\n    extra_info = {'device': current_device, 'quantization': current_quantization, 'batch_size': current_batch_size}\n    if current_settings:\n        extra_info.update(current_settings)\n    mapping_headers = {headers[i]: v for i, v in enumerate(row)}\n    with open(f'{os.path.splitext(filename)[0]}.json', 'a') as f:\n        for header, value in mapping_headers.items():\n            if header in ('dev', 'name', 'batch_size'):\n                continue\n            if not current_name:\n                continue\n            record = {'benchmark': {'name': 'TorchInductor', 'mode': current_mode, 'dtype': current_dtype, 'extra_info': extra_info}, 'model': {'name': current_name, 'type': 'OSS model', 'backend': current_backend, 'origins': [origin]}}\n            if isinstance(value, str):\n                record['metric'] = {'name': header, 'extra_info': {'benchmark_values': [value]}}\n            else:\n                record['metric'] = {'name': header, 'benchmark_values': [value]}\n            print(json.dumps(record), file=f)",
    "docstring": "Write the result into JSON format, so that it can be uploaded to the benchmark database to be displayed on OSS dashboard. The JSON format is defined at",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\dynamo\\common.py",
    "ast_data": "FunctionDef name:output_json arg:filename arg:headers arg:row arguments arg arg arg Assign If Compare Assign If Compare Assign If Compare Assign Assign If Call Assign Call With Call Call For Call If Compare If Assign If Call Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_variables_to_tensors",
    "source_code": "def _convert_variables_to_tensors(self):\n    return self",
    "docstring": "Converts ResourceVariable components to Tensors. Override this method to explicitly convert ResourceVariables embedded in the CompositeTensor to Tensors. By default, it returns the CompositeTensor unchanged. Returns: A CompositeTensor with all its ResourceVariable components converted to Tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor.py",
    "ast_data": "FunctionDef name:_convert_variables_to_tensors arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "to_int32",
    "source_code": "@tf_export(v1=['to_int32'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated(date=None, instructions='Use `tf.cast` instead.')\ndef to_int32(x, name='ToInt32'):\n    return cast(x, dtypes.int32, name=name)",
    "docstring": "Casts a tensor to type . Args: x: A or or . name: A name for the operation (optional). Returns: A or or with same shape as with type . Raises: TypeError: If cannot be cast to the . @compatibility(TF2) This name was deprecated and removed in TF2, but has an exact replacement . There are no further issues with eager execution or tf.function. Before: >>> tf.compat.v1.to_int32(tf.constant(1, dtype=tf.int64)) After: >>> tf.cast(tf.constant(1, dtype=tf.int64), tf.int32) @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:to_int32 arg:x arg:name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, *system, **kwargs):\n    dt = kwargs.pop('dt', True)\n    super().__init__(*system, **kwargs)\n    self.dt = dt",
    "docstring": "Initialize the baseclass. The heavy lifting is done by the subclasses.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "has_record",
    "source_code": "def has_record(self, name: str) -> bool:\n    return name in self.archive_file.get_all_written_records()",
    "docstring": "Check if a record exists in the archive.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\pt2_archive\\_package.py",
    "ast_data": "FunctionDef name:has_record arg:self arg:name arguments arg arg Return return:yes Compare Call"
  },
  {
    "library": "django",
    "name": "_get_queryset",
    "source_code": "def _get_queryset(klass):\n    if hasattr(klass, '_default_manager'):\n        return klass._default_manager.all()\n    return klass",
    "docstring": "Return a QuerySet or a Manager. Duck typing in action: any class with a method (for get_object_or_404) or a method (for get_list_or_404) might do the job.",
    "type": "function",
    "file_path": "django\\django\\shortcuts.py",
    "ast_data": "FunctionDef name:_get_queryset arg:klass arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_has_no_variables",
    "source_code": "def _has_no_variables(sess: session.Session) -> bool:\n    for op in sess.graph.get_operations():\n        if op.type.startswith('Variable') or op.type.endswith('VariableOp'):\n            return False\n    return True",
    "docstring": "Determines if the graph has any variables. Args: sess: TensorFlow Session. Returns: Bool.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\freeze_graph.py",
    "ast_data": "FunctionDef name:_has_no_variables arg:sess arguments arg For Call If BoolOp Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "proj",
    "source_code": "@property\ndef proj(self):\n    return capi.to_proj(self.ptr, byref(c_char_p()))",
    "docstring": "Return the PROJ representation for this Spatial Reference.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:proj arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_traced_op_names",
    "source_code": "def get_traced_op_names(self):\n    return self._traced_op_names",
    "docstring": "Returns the set of traced op names.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:get_traced_op_names arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "AddConstraintNotValid",
    "source_code": "class AddConstraintNotValid(AddConstraint):\n    category = OperationCategory.ADDITION\n\n    def __init__(self, model_name, constraint):\n        if not isinstance(constraint, CheckConstraint):\n            raise TypeError('AddConstraintNotValid.constraint must be a check constraint.')\n        super().__init__(model_name, constraint)\n\n    def describe(self):\n        return 'Create not valid constraint %s on model %s' % (self.constraint.name, self.model_name)\n\n    def database_forwards(self, app_label, schema_editor, from_state, to_state):\n        model = from_state.apps.get_model(app_label, self.model_name)\n        if self.allow_migrate_model(schema_editor.connection.alias, model):\n            constraint_sql = self.constraint.create_sql(model, schema_editor)\n            if constraint_sql:\n                schema_editor.execute(str(constraint_sql) + ' NOT VALID', params=None)\n\n    @property\n    def migration_name_fragment(self):\n        return super().migration_name_fragment + '_not_valid'",
    "docstring": "Add a table constraint without enforcing validation, using PostgreSQL's NOT VALID syntax.",
    "type": "class",
    "file_path": "django\\django\\contrib\\postgres\\operations.py",
    "ast_data": "ClassDef name:AddConstraintNotValid Assign FunctionDef name:__init__ arg:self arg:model_name arg:constraint arguments arg arg arg If Call Raise Call Call Call FunctionDef name:describe arg:self arguments arg Return return:yes FunctionDef name:database_forwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg Assign Call If Call Assign Call If Call Call FunctionDef name:migration_name_fragment arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "astype",
    "source_code": "def astype(self, dtype: Dtype, copy: bool=True):\n    if dtype is not None:\n        dtype = pandas_dtype(dtype)\n    if self.dtype == dtype:\n        return self.copy() if copy else self\n    values = self._data\n    if isinstance(values, ExtensionArray):\n        with rewrite_exception(type(values).__name__, type(self).__name__):\n            new_values = values.astype(dtype, copy=copy)\n    elif isinstance(dtype, ExtensionDtype):\n        cls = dtype.construct_array_type()\n        new_values = cls._from_sequence(self, dtype=dtype, copy=copy)\n    else:\n        new_values = astype_array(values, dtype=dtype, copy=copy)\n    result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False)\n    if not copy and self._references is not None and astype_is_view(self.dtype, dtype):\n        result._references = self._references\n        result._references.add_index_reference(result)\n    return result",
    "docstring": "Create an Index with values cast to dtypes. The class of a new Index is determined by dtype. When conversion is impossible, a TypeError exception is raised. Parameters ---------- dtype : numpy dtype or pandas type Note that any signed integer is treated as `dtype`, regardless of the size. copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and internal requirements on dtype are satisfied, the original data is used to create a new Index or the original Index is returned. Returns ------- Index Index with values cast to specified dtype. See Also -------- Index.dtype: Return the dtype object of the underlying data. Index.dtypes: Return the dtype object of the underlying data. Index.convert_dtypes: Convert columns to the best possible dtypes. Examples -------- >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.astype(\"float\") Index([1.0, 2.0, 3.0], dtype='float64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:astype arg:self arg:dtype arg:copy arguments arg arg arg If Compare Assign Call If Compare Return return:yes Call Assign If Call With Call Call Call Assign Call If Call Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "set_table_styles",
    "source_code": "def set_table_styles(self, table_styles: dict[Any, CSSStyles] | CSSStyles | None=None, axis: AxisInt=0, overwrite: bool=True, css_class_names: dict[str, str] | None=None) -> Styler:\n    if css_class_names is not None:\n        self.css = {**self.css, **css_class_names}\n    if table_styles is None:\n        return self\n    elif isinstance(table_styles, dict):\n        axis = self.data._get_axis_number(axis)\n        obj = self.data.index if axis == 1 else self.data.columns\n        idf = f'.{self.css['row']}' if axis == 1 else f'.{self.css['col']}'\n        table_styles = [{'selector': str(s['selector']) + idf + str(idx), 'props': maybe_convert_css_to_tuples(s['props'])} for key, styles in table_styles.items() for idx in obj.get_indexer_for([key]) for s in format_table_styles(styles)]\n    else:\n        table_styles = [{'selector': s['selector'], 'props': maybe_convert_css_to_tuples(s['props'])} for s in table_styles]\n    if not overwrite and self.table_styles is not None:\n        self.table_styles.extend(table_styles)\n    else:\n        self.table_styles = table_styles\n    return self",
    "docstring": "Set the table styles included within the ``Table Visualization `_ user guide for more details.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:set_table_styles arg:self arg:table_styles arg:axis arg:overwrite arg:css_class_names arguments arg arg arg arg arg If Compare Assign If Compare Return return:yes If Call Assign Call Assign Compare Assign Compare Assign Call Call Call Call Call Call Assign Call If BoolOp Compare Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_ratio_right",
    "source_code": "def load_ratio_right(M: int, N: int, O: int, P: int, m: int, n: int, o: int, p: int) -> float:\n    base = N * O + O * P + M * N + N * P\n    gemm = ceildiv(M, m) * ceildiv(P, p) * ceildiv(N, n) * (m * n + ceildiv(O, o) * (n * o + o * p))\n    return base / gemm",
    "docstring": "compute the ratio of estimated numbers of loads in baseline and b2bgemm M, N, O, P are matrix sizes m, n, o, p are block sizes | | baseline (lower bound) | b2bgemm | load | N * O + O * P + M * N + N * P | M / m * P / p * N / n * (m * n + O / o * (n * o + o * p)) | store | N * P + M * P | M * P b2bgemm is always better on stores, but for loads we need to find out beneficial cases using this function",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\b2b_gemm.py",
    "ast_data": "FunctionDef name:load_ratio_right arg:M arg:N arg:O arg:P arg:m arg:n arg:o arg:p arguments arg arg arg arg arg arg arg arg Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "emit_structseq_call",
    "source_code": "def emit_structseq_call(overloads: Sequence[PythonSignatureNativeFunctionPair]) -> tuple[list[str], dict[str, str]]:\n    typenames: dict[str, str] = {}\n    typedefs: list[str] = []\n    for overload in overloads:\n        fieldnames = structseq_fieldnames(overload.function.func.returns)\n        if not fieldnames:\n            continue\n        name = cpp.name(overload.function.func)\n        tn_key = gen_structseq_typename_key(overload.function)\n        typename = typenames.get(tn_key)\n        if typename is None:\n            typename = f'NamedTuple{('' if not typedefs else len(typedefs))}'\n            typenames[tn_key] = typename\n            typedefs.append(f'static PyTypeObject* {typename} = generated::get_{name}_structseq();')\n    return (typedefs, typenames)",
    "docstring": "Generate block of named tuple type def inits, and add typeref snippets to declarations that use them",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py",
    "ast_data": "FunctionDef name:emit_structseq_call arg:overloads arguments arg For Assign Call If Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "device_count",
    "source_code": "def device_count() -> int:\n    global _cached_device_count\n    if not _is_compiled():\n        return 0\n    if _cached_device_count is not None:\n        return _cached_device_count\n    nvml_count = _device_count_amdsmi() if torch.version.hip else _device_count_nvml()\n    r = torch._C._cuda_getDeviceCount() if nvml_count < 0 else nvml_count\n    if _initialized:\n        _cached_device_count = r\n    return r",
    "docstring": "Return the number of GPUs available. .. note:: This API will NOT posion fork if NVML discovery succeeds. See :ref: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:device_count arguments If Call Return return:yes If Compare Return return:yes Assign Call Call Assign Compare Call If Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "avg_pool2d",
    "source_code": "def avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):\n    if not input.is_quantized:\n        raise ValueError(\"Input to 'quantized.avg_pool2d' must be quantized!\")\n    return torch.nn.functional.avg_pool2d(input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)",
    "docstring": "Applies 2D average-pooling operation in :math: regions by step size :math: steps. The number of output features is equal to the number of input planes. .. note:: The input quantization parameters propagate to the output. See :class: for details and output shape. Args: input: quantized input tensor :math: kernel_size: size of the pooling region. Can be a single number or a tuple stride: stride of the pooling operation. Can be a single number or a tuple . Default: :attr: padding: implicit zero paddings on both sides of the input. Can be a single number or a tuple . Default: 0 ceil_mode: when True, will use instead of in the formula to compute the output shape. Default: `` divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used. Default: None",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:avg_pool2d arg:input arg:kernel_size arg:stride arg:padding arg:ceil_mode arg:count_include_pad arg:divisor_override arguments arg arg arg arg arg arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "time_spherical_polygon_vertex_sorting",
    "source_code": "def time_spherical_polygon_vertex_sorting(self, num_points):\n    self.sv.sort_vertices_of_regions()",
    "docstring": "Time the vertex sorting operation in the Spherical Voronoi code.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_spherical_polygon_vertex_sorting arg:self arg:num_points arguments arg arg Call"
  },
  {
    "library": "cryptography",
    "name": "private_numbers",
    "source_code": "@abc.abstractmethod\ndef private_numbers(self) -> RSAPrivateNumbers:\n    pass",
    "docstring": "Returns an RSAPrivateNumbers.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:private_numbers arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_LinearOperatorGradient",
    "source_code": "class _LinearOperatorGradient(composite_tensor_gradient.CompositeTensorGradient):\n\n    def get_gradient_components(self, value):\n        return value._type_spec._to_components(value)\n\n    def replace_gradient_components(self, value, components):\n        flat_components = nest.flatten(components)\n        if all((c is None for c in flat_components)):\n            return None\n        value_components = value._type_spec._to_components(value)\n        flat_grad_components = []\n        for gc, vc in zip(flat_components, nest.flatten(value_components)):\n            if gc is None:\n                flat_grad_components.append(nest.map_structure(lambda x: array_ops.zeros_like(x, dtype=value.dtype), vc, expand_composites=True))\n            else:\n                flat_grad_components.append(gc)\n        grad_components = nest.pack_sequence_as(value_components, flat_grad_components)\n        return value._type_spec._from_components(grad_components)",
    "docstring": "Composite tensor gradient for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "ClassDef name:_LinearOperatorGradient FunctionDef name:get_gradient_components arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:replace_gradient_components arg:self arg:value arg:components arguments arg arg arg Assign Call If Call Compare Return return:no Assign Call Assign For Call Call If Compare Call Call arguments arg Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "to_dense",
    "source_code": "def to_dense(self) -> Tensor:\n    partial_dense = _ordered_to_dense(self.kv_num_blocks, self.kv_indices)\n    if self.full_kv_num_blocks is not None:\n        assert self.full_kv_indices is not None\n        return partial_dense | _ordered_to_dense(self.full_kv_num_blocks, self.full_kv_indices)\n    return partial_dense",
    "docstring": "Returns a dense block that is equivalent to the block mask.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py",
    "ast_data": "FunctionDef name:to_dense arg:self arguments arg Assign Call If Compare Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "def predict_log_proba(self, X):\n    proba = self.predict_proba(X)\n    if self.n_outputs_ == 1:\n        return np.log(proba)\n    else:\n        for k in range(self.n_outputs_):\n            proba[k] = np.log(proba[k])\n        return proba",
    "docstring": "Predict class log-probabilities of the input samples X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to `classes_`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\tree\\_classes.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Assign Call If Compare Return return:yes Call For Call Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_service_documentation",
    "source_code": "def validate_service_documentation(self):\n    value = self.get('service_documentation')\n    if value and (not is_valid_url(value)):\n        raise ValueError('\"service_documentation\" MUST be a URL')",
    "docstring": "OPTIONAL. URL of a page containing human-readable information that developers might want or need to know when using the authorization server. In particular, if the authorization server does not support Dynamic Client Registration, then information on how to register clients needs to be provided in this documentation.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_service_documentation arg:self arguments arg Assign Call If BoolOp Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "__getstate__",
    "source_code": "def __getstate__(self):\n    state = self.__dict__\n    if MapDataPipe.getstate_hook is not None:\n        return MapDataPipe.getstate_hook(state)\n    return state",
    "docstring": "Serialize functions when is available. If this doesn't cover your custom DataPipe's use case, consider writing custom methods for and , or use for serialization.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\datapipe.py",
    "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Assign If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_xy",
    "source_code": "def get_xy(self):\n    return (self._x0, self._y0)",
    "docstring": "Return the left and bottom coords of the rectangle as a tuple.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_xy arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "start_in_process_as",
    "source_code": "def start_in_process_as(self, as_task_type, as_task_id):\n    if self._processes:\n        raise ValueError('MultiProcessRunner already started.')\n    with self._process_lock:\n        if self._joined:\n            raise ValueError('cannot start new processes afterMultiProcessRunner.join() is called')\n        for task_type, addresses in self._cluster_spec.items():\n            for task_id, _ in enumerate(addresses):\n                if not (task_type == as_task_type and task_id == as_task_id):\n                    self._start_subprocess_and_reading_thread(task_type, task_id)\n    _set_tf_config(as_task_type, as_task_id, self._cluster_spec, self._rpc_layer)\n    self._fn(*self._args, **self._kwargs)",
    "docstring": "Start the processes, with the specified task run in main process. This is similar to except that the task with task_type and task_id is run in the main process. This method is particularly useful when debugging tool such as is needed in some specific task. Note that since this method is blocking until that specific task exits, additional actions would need a thread to be called: Note that if , the logs/stdout by task run by the main process is not available in result.stdout. Args: as_task_type: The task type to be run in the main process. as_task_id: The task id to be run in the main process.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:start_in_process_as arg:self arg:as_task_type arg:as_task_id arguments arg arg arg If Raise Call With If Raise Call For Call For Call If BoolOp Compare Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "manual",
    "source_code": "@classmethod\ndef manual(cls):\n    return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.MANUAL))",
    "docstring": "Returns a manuall sharding attribute. This means the op is manually partitioned by the user and XLA will not change the shapes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:manual arg:cls arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_op_maker",
    "source_code": "def _op_maker(op_class, op_symbol):\n\n    def f(self, node, *args, **kwargs):\n        return partial(op_class, op_symbol, *args, **kwargs)\n    return f",
    "docstring": "Return a function to create an op class with its symbol already passed. Returns ------- callable",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expr.py",
    "ast_data": "FunctionDef name:_op_maker arg:op_class arg:op_symbol arguments arg arg FunctionDef name:f arg:self arg:node arguments arg arg arg arg Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_checkpoint_values_check",
    "source_code": "def add_checkpoint_values_check(object_graph_proto):\n    parents = {}\n    checkpointed_trackables = object_identity.ObjectIdentitySet()\n    checkpointed_trackables = set()\n    for node_id, object_proto in enumerate(object_graph_proto.nodes):\n        if object_proto.attributes or object_proto.slot_variables or object_proto.HasField('registered_saver'):\n            checkpointed_trackables.add(node_id)\n        for child_proto in object_proto.children:\n            child = child_proto.node_id\n            if child not in parents:\n                parents[child] = set()\n            parents[child].add(node_id)\n    to_visit = set()\n    to_visit.update(checkpointed_trackables)\n    while to_visit:\n        trackable = to_visit.pop()\n        if trackable not in parents:\n            continue\n        current_parents = parents.pop(trackable)\n        checkpointed_trackables.update(current_parents)\n        for parent in current_parents:\n            if parent in parents:\n                to_visit.add(parent)\n    for node_id, object_proto in enumerate(object_graph_proto.nodes):\n        object_proto.has_checkpoint_values.value = bool(node_id in checkpointed_trackables)",
    "docstring": "Determines which objects have checkpoint values and save this to the proto. Args: object_graph_proto: A proto.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\util.py",
    "ast_data": "FunctionDef name:add_checkpoint_values_check arg:object_graph_proto arguments arg Assign Assign Call Assign Call For Call If BoolOp Call Call For Assign If Compare Assign Call Call Assign Call Call While Assign Call If Compare Assign Call Call For If Compare Call For Call Assign Call Compare"
  },
  {
    "library": "pandas",
    "name": "nlargest",
    "source_code": "def nlargest(self, n: int, columns: IndexLabel, keep: NsmallestNlargestKeep='first') -> DataFrame:\n    return selectn.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()",
    "docstring": "Return the first rows ordered by in descending order. Return the first rows with the largest values in , in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to `nncolumnsnobjectcategory` distinct largest elements: >>> df.nlargest(5, \"population\", keep=\"all\") population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN To order by the largest values in column \"population\" and then \"GDP\", we can specify multiple columns like in the next example. >>> df.nlargest(3, [\"population\", \"GDP\"]) population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:nlargest arg:self arg:n arg:columns arg:keep arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "iter_encode",
    "source_code": "def iter_encode(self, obj):\n    if obj.get('description', None):\n        for row in obj['description'].split('\\n'):\n            yield self._encode_comment(row)\n    if not obj.get('relation'):\n        raise BadObject('Relation name not found or with invalid value.')\n    yield self._encode_relation(obj['relation'])\n    yield ''\n    if not obj.get('attributes'):\n        raise BadObject('Attributes not found.')\n    attribute_names = set()\n    for attr in obj['attributes']:\n        if not isinstance(attr, (tuple, list)) or len(attr) != 2 or (not isinstance(attr[0], str)):\n            raise BadObject('Invalid attribute declaration \"%s\"' % str(attr))\n        if isinstance(attr[1], str):\n            if attr[1] not in _SIMPLE_TYPES:\n                raise BadObject('Invalid attribute type \"%s\"' % str(attr))\n        elif not isinstance(attr[1], (tuple, list)):\n            raise BadObject('Invalid attribute type \"%s\"' % str(attr))\n        if attr[0] in attribute_names:\n            raise BadObject('Trying to use attribute name \"%s\" for the second time.' % str(attr[0]))\n        else:\n            attribute_names.add(attr[0])\n        yield self._encode_attribute(attr[0], attr[1])\n    yield ''\n    attributes = obj['attributes']\n    yield _TK_DATA\n    if 'data' in obj:\n        data = _get_data_object_for_encoding(obj.get('data'))\n        yield from data.encode_data(obj.get('data'), attributes)\n    yield ''",
    "docstring": "The iterative version of . This encodes iteratively a given object and return, one-by-one, the lines of the ARFF file. :param obj: the object containing the ARFF information. :return: (yields) the ARFF file as strings.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "FunctionDef name:iter_encode arg:self arg:obj arguments arg arg If Call For Call Call If Call Raise Call Call If Call Raise Call Assign Call For If BoolOp Call Compare Call Call Raise Call Call If Call If Compare Raise Call Call If Call Raise Call Call If Compare Raise Call Call Call Call Assign If Compare Assign Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__eq__",
    "source_code": "def __eq__(self, other):\n    return (self._gridspec, self.num1, self.num2) == (getattr(other, '_gridspec', object()), getattr(other, 'num1', object()), getattr(other, 'num2', object()))",
    "docstring": "Two SubplotSpecs are considered equal if they refer to the same position(s) in the same .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "grouped_gemm_pass",
    "source_code": "def grouped_gemm_pass(graph: torch.fx.Graph):\n    computation_op = mkldnn._linear_pointwise.default\n    from ..mkldnn_lowerings import grouped_gemm_lowering\n    for node in graph.find_nodes(op='call_function', target=computation_op):\n        if not node._erased and isinstance(node.meta.get('val'), torch.Tensor) and (node.meta['val'].device.type == 'cpu'):\n            act = node.args[0]\n            users = list(act.users)\n            if _is_valid_grouped_gemm_fusion(users):\n                with graph.inserting_before(node):\n                    grouped_gemm_node = graph.create_node('call_function', grouped_gemm_lowering, (act, [user.args[1] for user in users], [user.args[2] for user in users]))\n                    grouped_gemm_node.meta['val'] = [user.meta['val'] for user in users]\n                    with graph.inserting_after(grouped_gemm_node):\n                        for gemm_idx, user in enumerate(users):\n                            assert user.target == computation_op\n                            get_item = graph.create_node('call_function', operator.getitem, (grouped_gemm_node, gemm_idx))\n                            user.replace_all_uses_with(get_item)\n                            graph.erase_node(user)\n    return",
    "docstring": "Group GEMM has multi output nodes which is compilicated to define a Pattern. Use below way to connect the pattern to the lowering. TODO: Use MultiOutputPattern, current limitation is the pattern requires fixed number of output nodes. Extend to support Group GEMM for pattern matcher.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\mkldnn_fusion.py",
    "ast_data": "FunctionDef name:grouped_gemm_pass arg:graph arguments arg Assign For Call If BoolOp Call Call Compare Assign Assign Call If Call With Call Assign Call Assign With Call For Call Compare Assign Call Call Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "_undo_trajectory",
    "source_code": "def _undo_trajectory(self):\n    for t in self._traj:\n        self._mask[t] = 0",
    "docstring": "Remove current trajectory from mask",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\streamplot.py",
    "ast_data": "FunctionDef name:_undo_trajectory arg:self arguments arg For Assign"
  },
  {
    "library": "matplotlib",
    "name": "contains",
    "source_code": "def contains(self, mouseevent):\n    return (False, {})",
    "docstring": "Test whether the mouse event occurred in the Tick marks. This function always returns false. It is more useful to test if the axis as a whole contains the mouse rather than the set of tick marks.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:contains arg:self arg:mouseevent arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "blit",
    "source_code": "def blit(self, bbox=None):\n    pass",
    "docstring": "Blit the canvas in bbox (default entire canvas).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:blit arg:self arg:bbox arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "get_cell",
    "source_code": "def get_cell(self, *labels):\n    return BoolGaugeCell(super(BoolGauge, self).get_cell(*labels))",
    "docstring": "Retrieves the cell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:get_cell arg:self arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "__del__",
    "source_code": "def __del__(self):\n    self.unregister_callback()",
    "docstring": "Calls unregister_callback() to make sure to finalize outputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:__del__ arg:self arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "_unnormalized_transform",
    "source_code": "def _unnormalized_transform(self, X):\n    doc_topic_distr, _ = self._e_step(X, cal_sstats=False, random_init=False)\n    return doc_topic_distr",
    "docstring": "Transform data X according to fitted model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Document word matrix. Returns ------- doc_topic_distr : ndarray of shape (n_samples, n_components) Document topic distribution for X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py",
    "ast_data": "FunctionDef name:_unnormalized_transform arg:self arg:X arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "score_samples",
    "source_code": "def score_samples(self, X):\n    return self.decision_function(X) + self.offset_",
    "docstring": "Raw scoring function of the samples. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- score_samples : ndarray of shape (n_samples,) Returns the (unshifted) scoring function of the samples.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_classes.py",
    "ast_data": "FunctionDef name:score_samples arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_clear_losses",
    "source_code": "def _clear_losses(self):\n    if not getattr(self, '_self_tracked_trackables', None):\n        self._thread_local._eager_losses = []\n    else:\n        for layer in self._flatten_layers():\n            layer._thread_local._eager_losses = []",
    "docstring": "Used every step in eager to reset losses.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_clear_losses arg:self arguments arg If Call Assign For Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "pack_sequence_as",
    "source_code": "def pack_sequence_as(structure, flat_sequence):\n    return nest_util.pack_sequence_as(nest_util.Modality.DATA, structure, flat_sequence, expand_composites=False)",
    "docstring": "Returns a given flattened sequence packed into a nest. If is a scalar, must be a single-element list; in this case the return value is . Args: structure: tuple or list constructed of scalars and/or other tuples/lists, or a scalar. Note: numpy arrays are considered scalars. flat_sequence: flat sequence to pack. Returns: packed: converted to have the same recursive structure as . Raises: ValueError: If nest and structure have different element counts.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\nest.py",
    "ast_data": "FunctionDef name:pack_sequence_as arg:structure arg:flat_sequence arguments arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__str__",
    "source_code": "def __str__(self):\n    exception_strings = map(repr, self.get_instances())\n    return self.delimiter.join(exception_strings)",
    "docstring": "Render the list of errors, which happened in channel.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\wspbus.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, fallback: Callable[[], Optional[ChoiceCaller]], choices: list[ChoiceCaller], input_nodes: list[Any], context: AHContext, name: str, augment_context: Optional[list[AHOperation]]=None, precondition: Optional[Callable[[AHMetadata, AHContext], bool]]=None) -> None:\n    self.input_nodes = input_nodes\n    self.choicestr2choice: dict[str, ChoiceCaller] = {}\n    for choice in choices:\n        self.choicestr2choice[choice.autoheuristic_id()] = choice\n    choices_str = list(self.choicestr2choice.keys())\n\n    def fallback_str() -> str:\n        fallback_choice = fallback()\n        if fallback_choice is None:\n            return 'unsure'\n        return fallback_choice.autoheuristic_id()\n    super().__init__(fallback_str, choices_str, None, context, name, augment_context, precondition)\n    if torch._inductor.config.collect_autoheuristic(self.name) and self.satisfies_precondition():\n        self.register_global_feedback(input_nodes, choices)",
    "docstring": "The arguments choices, input_nodes and name have to match the ones used in the call to autotune_select_algorithm(), e.g. if the following call is made autotune_select_algorithm(name, choices, input_nodes, layout), the same name, choices and input_nodes have to be used here.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\autoheuristic.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fallback arg:choices arg:input_nodes arg:context arg:name arg:augment_context arg:precondition arguments arg arg arg arg arg arg arg arg Assign For Assign Call Assign Call Call FunctionDef name:fallback_str arguments Assign Call If Compare Return return:yes Return return:yes Call Call Call If BoolOp Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_Saveable",
    "source_code": "class _Saveable(BaseSaverBuilder.SaveableObject):\n\n    def __init__(self, table, name, table_name=None):\n        tensors = table.export()\n        specs = [BaseSaverBuilder.SaveSpec(tensors[0], '', name + '-keys'), BaseSaverBuilder.SaveSpec(tensors[1], '', name + '-values')]\n        self.table_name = table_name or name\n        super(DenseHashTable._Saveable, self).__init__(table, specs, name)\n\n    def restore(self, restored_tensors, restored_shapes):\n        del restored_shapes\n        with ops.name_scope('%s_table_restore' % self.table_name):\n            with ops.colocate_with(self.op.resource_handle):\n                return gen_lookup_ops.lookup_table_import_v2(self.op.resource_handle, restored_tensors[0], restored_tensors[1])",
    "docstring": "SaveableObject implementation for DenseHashTable.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "ClassDef name:_Saveable FunctionDef name:__init__ arg:self arg:table arg:name arg:table_name arguments arg arg arg arg Assign Call Assign Call Call Assign BoolOp Call Call FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg With Call With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_reference_quantized_lstm_module",
    "source_code": "def _get_reference_quantized_lstm_module(observed_lstm: torch.ao.nn.quantizable.LSTM, backend_config: Optional[BackendConfig]=None) -> torch.ao.nn.quantized.LSTM:\n    quantized_lstm = torch.ao.nn.quantized.LSTM(observed_lstm.input_size, observed_lstm.hidden_size, observed_lstm.num_layers, observed_lstm.bias, observed_lstm.batch_first, observed_lstm.dropout, observed_lstm.bidirectional)\n    for i, layer in enumerate(quantized_lstm.layers):\n        cell = copy.deepcopy(observed_lstm.layers.get_submodule(str(i)).layer_fw.cell)\n        cell = convert_to_reference_fx(cell, backend_config=backend_config)\n        assert isinstance(cell, torch.fx.GraphModule)\n        for node in cell.graph.nodes:\n            if node.target == torch.quantize_per_tensor:\n                arg = node.args[0]\n                if arg.target == 'x' or (arg.target == operator.getitem and arg.args[0].target == 'hidden'):\n                    with cell.graph.inserting_before(node):\n                        node.replace_all_uses_with(arg)\n                        cell.graph.erase_node(node)\n            if node.target == 'output':\n                for arg in node.args[0]:\n                    with cell.graph.inserting_before(node):\n                        node.replace_input_with(arg, arg.args[0])\n        cell.graph.eliminate_dead_code()\n        cell.recompile()\n        layer.layer_fw.cell = cell\n    return quantized_lstm",
    "docstring": "Return a created from a with observers or fake quantizes inserted through , e.g. from . This is meant to be used to convert an observed module to a quantized module in the custom module flow. Args: : a observed through : BackendConfig to use to produce the reference quantized model Return: A reference module.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\lstm_utils.py",
    "ast_data": "FunctionDef name:_get_reference_quantized_lstm_module arg:observed_lstm arg:backend_config arguments arg arg Assign Call For Call Assign Call Call Call Assign Call Call For If Compare Assign If BoolOp Compare BoolOp Compare Compare With Call Call Call If Compare For With Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_AddSaveOps",
    "source_code": "def _AddSaveOps(self, filename_tensor, saveables):\n    save = self.save_op(filename_tensor, saveables)\n    return control_flow_ops.with_dependencies([save], filename_tensor)",
    "docstring": "Add ops to save variables that are on the same shard. Args: filename_tensor: String Tensor. saveables: A list of SaveableObject objects. Returns: A tensor with the filename used to save.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:_AddSaveOps arg:self arg:filename_tensor arg:saveables arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_InitializeValues",
    "source_code": "def _InitializeValues(self, values):\n    self._values = set()\n    for x in values:\n        if isinstance(x, tensor_lib.Tensor):\n            self._values.add(x.name)\n        else:\n            raise TypeError(f\"'values' must be a list of Tensors. Received: {type(x)}.\")",
    "docstring": "Makes the values known to this context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_InitializeValues arg:self arg:values arguments arg arg Assign Call For If Call Call Raise Call Call"
  },
  {
    "library": "numpy",
    "name": "check_gcc_function_attribute_with_intrinsics",
    "source_code": "def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code, include):\n    cmd._check_compiler()\n    body = textwrap.dedent('\\n        #include<%s>\\n        int %s %s(void)\\n        {\\n            %s;\\n            return 0;\\n        }\\n\\n        int\\n        main()\\n        {\\n            return 0;\\n        }\\n        ') % (include, attribute, name, code)\n    return cmd.try_compile(body, None, None) != 0",
    "docstring": "Return True if the given function attribute is supported with intrinsics.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\command\\autodist.py",
    "ast_data": "FunctionDef name:check_gcc_function_attribute_with_intrinsics arg:cmd arg:attribute arg:name arg:code arg:include arguments arg arg arg arg arg Call Assign Call Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "__reversed__",
    "source_code": "def __reversed__(self):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.__reversed__, (self,), self)\n    if self.dim() == 0:\n        return self\n    else:\n        return self.flip(0)",
    "docstring": "Reverses the tensor along dimension 0.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:__reversed__ arg:self arguments arg If Call Return return:yes Call If Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "generate_binconstraint_d",
    "source_code": "@register_transformation_rule(BinConstraintD)\ndef generate_binconstraint_d(constraint, counter):\n    if constraint.op == op_precision:\n        if isinstance(constraint.lhs, int):\n            return (BinConstraintD(constraint.lhs, constraint.rhs, op_eq), counter)\n        elif constraint.lhs == Dyn:\n            return (T(), counter)\n    elif constraint.op == op_consistency:\n        return (Disj([BinConstraintD(constraint.lhs, constraint.rhs, op_eq), BinConstraintD(constraint.rhs, Dyn, op_eq), BinConstraintD(constraint.lhs, Dyn, op_eq)]), counter)\n    else:\n        return (constraint, counter)",
    "docstring": "Transform binary constraints for dimensions",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:generate_binconstraint_d arg:constraint arg:counter arguments arg arg If Compare If Call Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "clear_weight_quant_obs_node",
    "source_code": "def clear_weight_quant_obs_node(op_node: Node, modules: dict[str, nn.Module]) -> None:\n    weight_eq_obs_node = maybe_get_weight_eq_obs_node(op_node, modules)\n    if weight_eq_obs_node is None:\n        return\n    weight_quant_obs_node = weight_eq_obs_node.args[0]\n    if weight_quant_obs_node is None:\n        return\n    assert isinstance(weight_quant_obs_node, Node)\n    weight_quant_obs = modules[str(weight_quant_obs_node.target)]\n    assert isinstance(modules[str(weight_quant_obs_node.target)], ObserverBase)\n    weight_quant_obs.reset_min_max_vals()",
    "docstring": "Given the operation node, we want find the corresponding quantization observer and reset its min/max values",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:clear_weight_quant_obs_node arg:op_node arg:modules arguments arg arg Assign Call If Compare Return return:no Assign If Compare Return return:no Call Assign Call Call Call Call"
  },
  {
    "library": "django",
    "name": "intersection",
    "source_code": "def intersection(self, other):\n    return self._geomgen(capi.geom_intersection, other)",
    "docstring": "Return a new geometry consisting of the region of intersection of this geometry and the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:intersection arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_validate_where",
    "source_code": "def _validate_where(w):\n    if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):\n        raise TypeError('where must be passed as a string, PyTablesExpr, or list-like of PyTablesExpr')\n    return w",
    "docstring": "Validate that the where statement is of the right type. The type may either be String, Expr, or list-like of Exprs. Parameters ---------- w : String term expression, Expr, or list-like of Exprs. Returns ------- where : The original where clause if the check was successful. Raises ------ TypeError : An invalid data type was passed in for w (e.g. dict).",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "FunctionDef name:_validate_where arg:w arguments arg If BoolOp Call Call Raise Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "relative_transformation",
    "source_code": "def relative_transformation(trans_01: Tensor, trans_02: Tensor) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(trans_01)\n    KORNIA_CHECK_IS_TENSOR(trans_02)\n    if not (trans_01.dim() in (2, 3) and trans_01.shape[-2:] == (4, 4)):\n        raise ValueError(f'Input must be a of the shape Nx4x4 or 4x4. Got {trans_01.shape}')\n    if not (trans_02.dim() in (2, 3) and trans_02.shape[-2:] == (4, 4)):\n        raise ValueError(f'Input must be a of the shape Nx4x4 or 4x4. Got {trans_02.shape}')\n    if not trans_01.dim() == trans_02.dim():\n        raise ValueError(f'Input number of dims must match. Got {trans_01.dim()} and {trans_02.dim()}')\n    trans_10 = inverse_transformation(trans_01)\n    trans_12 = compose_transformations(trans_10, trans_02)\n    return trans_12",
    "docstring": "Compute the relative homogeneous transformation from a reference transformation. :math: to destination :math:. The relative transformation is computed as follows: .. math:: T_1^{2} = (T_0^{1})^{-1} \\cdot T_0^{2} Args: trans_01: reference transformation tensor of shape :math: or :math:. trans_02: destination transformation tensor of shape :math: or :math:. Returns: the relative transformation between the transformations with shape :math: or :math:. Example:: >>> trans_01 = torch.eye(4) # 4x4 >>> trans_02 = torch.eye(4) # 4x4 >>> trans_12 = relative_transformation(trans_01, trans_02) # 4x4",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\linalg.py",
    "ast_data": "FunctionDef name:relative_transformation arg:trans_01 arg:trans_02 arguments arg arg Call Call If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Raise Call If Compare Call Call Raise Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_type_proto",
    "source_code": "@classmethod\ndef experimental_type_proto(cls) -> Type[struct_pb2.TensorSpecProto]:\n    return struct_pb2.TensorSpecProto",
    "docstring": "Returns the type of proto associated with TensorSpec serialization.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:experimental_type_proto arg:cls arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, xs, ys, zs, *args, axlim_clip=False, **kwargs):\n    super().__init__([], [], *args, **kwargs)\n    self.set_data_3d(xs, ys, zs)\n    self._axlim_clip = axlim_clip",
    "docstring": "Parameters ---------- xs : array-like The x-data to be plotted. ys : array-like The y-data to be plotted. zs : array-like The z-data to be plotted. *args, **kwargs Additional arguments are passed to .",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:xs arg:ys arg:zs arguments arg arg arg arg arg arg arg Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_submodule_n_params",
    "source_code": "def _get_submodule_n_params(module: nn.Module, path: str):\n    if '.' in path:\n        path_list = path.split('.')\n        parent_module_path = '.'.join(path_list[:-1])\n        module = module.get_submodule(parent_module_path)\n        path = path_list[-1]\n    return (module, path)",
    "docstring": "Get submodule and the direct path of parameter from the module",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\ddp.py",
    "ast_data": "FunctionDef name:_get_submodule_n_params arg:module arg:path arguments arg arg If Compare Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "members_of",
    "source_code": "def members_of(obj: Any, *, config: Config) -> Sequence[str]:\n    if config.autosummary_ignore_module_all:\n        return dir(obj)\n    else:\n        if (obj___all__ := getall(obj)) is not None:\n            return obj___all__\n        return dir(obj)",
    "docstring": "Get the members of `` setting.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\autosummary\\generate.py",
    "ast_data": "FunctionDef name:members_of arg:obj arguments arg arg If Return return:yes Call If Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "indices",
    "source_code": "@property\ndef indices(self):\n    return self._indices",
    "docstring": "The indices of non-zero values in the represented dense tensor. Returns: A 2-D Tensor of int64 with dense_shape , where is the number of non-zero values in the tensor, and is the rank.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:indices arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_reduce_grad_no_shard",
    "source_code": "@no_type_check\ndef _reduce_grad_no_shard(state: _FSDPState, handle: FlatParamHandle) -> None:\n    flat_param = handle.flat_param\n    if state._comm_hook is None:\n        _div_if_needed(flat_param.grad, state._gradient_predivide_factor)\n        dist.all_reduce(flat_param.grad, group=state.process_group)\n        _div_if_needed(flat_param.grad, state._gradient_postdivide_factor)\n    else:\n        state._comm_hook(state._comm_hook_state, flat_param.grad)\n    if not handle._keep_low_precision_grads:\n        _cast_grad_to_param_dtype(state, flat_param.grad, flat_param)\n    grad_to_offload = flat_param.grad.data\n    _post_reduce_grad_callback(state, handle, grad_to_offload)",
    "docstring": "For no-shard, this runs gradient reduction (which directly covers any gradient accumulation implicitly) and the post-reduction callback.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_reduce_grad_no_shard arg:state arg:handle arguments arg arg Assign If Compare Call Call Call Call If Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_tensorarray_set_item",
    "source_code": "def _tf_tensorarray_set_item(target, i, x):\n    return target.write(i, x)",
    "docstring": "Overload of set_item that stages a TensorArray write.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py",
    "ast_data": "FunctionDef name:_tf_tensorarray_set_item arg:target arg:i arg:x arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "summary_scope",
    "source_code": "@tf_export('summary.experimental.summary_scope', v1=[])\n@tf_contextlib.contextmanager\ndef summary_scope(name, default_name='summary', values=None):\n    name = name or default_name\n    current_scope = ops.get_name_scope()\n    tag = current_scope + '/' + name if current_scope else name\n    name = _INVALID_SCOPE_CHARACTERS.sub('', name) or None\n    with ops.name_scope(name, default_name, values, skip_on_eager=False) as scope:\n        yield (tag, scope)",
    "docstring": "Experimental context manager for use when defining a custom summary op. This behaves similarly to , except that it returns a generated summary tag in addition to the scope name. The tag is structurally similar to the scope name - derived from the user-provided name, prefixed with enclosing name scopes if any - but we relax the constraint that it be uniquified, as well as the character set limitation (so the user-provided name can contain characters not legal for scope names; in the scope name these are removed). This makes the summary tag more predictable and consistent for the user. For example, to define a new summary op called : Args: name: string name for the summary. default_name: Optional; if provided, used as default name of the summary. values: Optional; passed as parameter to name_scope. Yields: A tuple as described above.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:summary_scope arg:name arg:default_name arg:values arguments arg arg arg Assign BoolOp Assign Call Assign Assign BoolOp Call With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_trackable_needs_to_be_saved",
    "source_code": "def _trackable_needs_to_be_saved(obj):\n    if hasattr(obj, '__dict__'):\n        if '_serialize_to_tensors' in obj.__dict__ or '_gather_saveables_for_checkpoint' in obj.__dict__ or '_copy_trackable_to_cpu' in obj.__dict__:\n            return True\n    for t in type(obj).mro():\n        if t is base.Trackable:\n            continue\n        elif '_serialize_to_tensors' in t.__dict__ or '_gather_saveables_for_checkpoint' in t.__dict__ or '_copy_trackable_to_cpu' in t.__dict__:\n            return True\n    return False",
    "docstring": "Returns whether a trackable needs to be saved. Returns a bool to indicate whether obj's class has , , or defined. Args: obj: A Trackable object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:_trackable_needs_to_be_saved arg:obj arguments arg If Call If BoolOp Compare Compare Compare Return return:yes For Call Call If Compare If BoolOp Compare Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_reduce_sum_grad",
    "source_code": "@ops.RegisterGradient('NcclReduce')\ndef _reduce_sum_grad(op, grad):\n    if op.get_attr('reduction') != b'sum':\n        raise LookupError('No gradient defined for NcclAllReduce except for reduction=\"sum\".')\n    _check_device(grad, expected=op.device)\n    with ops.device(op.device):\n        result = gen_nccl_ops.nccl_broadcast(input=grad, shape=grad.shape)\n    return [result] * len(op.inputs)",
    "docstring": "The gradients for input of . Args: op: The that we are differentiating. grad: Gradient with respect to the output of the op. Returns: The gradient with respect to the input of op. Raises: LookupError: If the reduction attribute of op is not .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nccl_ops.py",
    "ast_data": "FunctionDef name:_reduce_sum_grad arg:op arg:grad arguments arg arg If Compare Call Raise Call Call With Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_tnp_to_np_map",
    "source_code": "@functools.lru_cache(maxsize=1)\ndef get_tnp_to_np_map():\n    m = get_np_to_tnp_map()\n    return {v: k for k, v in m.items()}",
    "docstring": "This is just the reverse mapping of get_np_to_tnp_map() - mapping from torch._numpy modules to numpy equivalents.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py",
    "ast_data": "FunctionDef name:get_tnp_to_np_map arguments Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "render_flatpage",
    "source_code": "@csrf_protect\ndef render_flatpage(request, f):\n    if f.registration_required and (not request.user.is_authenticated):\n        from django.contrib.auth.views import redirect_to_login\n        return redirect_to_login(request.path)\n    if f.template_name:\n        template = loader.select_template((f.template_name, DEFAULT_TEMPLATE))\n    else:\n        template = loader.get_template(DEFAULT_TEMPLATE)\n    f.title = mark_safe(f.title)\n    f.content = mark_safe(f.content)\n    return HttpResponse(template.render({'flatpage': f}, request))",
    "docstring": "Internal interface to the flat page view.",
    "type": "function",
    "file_path": "django\\django\\contrib\\flatpages\\views.py",
    "ast_data": "FunctionDef name:render_flatpage arg:request arg:f arguments arg arg If BoolOp Return return:yes Call If Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_stream",
    "source_code": "def _get_stream(device: torch.device):\n    global _streams\n    if device.type == 'cpu' or not torch.accelerator.is_available():\n        return None\n    assert torch.accelerator.current_accelerator().type == device.type\n    if _streams is None:\n        _streams = [None] * torch.accelerator.device_count()\n    if _streams[device.index] is None:\n        _streams[device.index] = torch.Stream(device.index)\n    return _streams[device.index]",
    "docstring": "Get a background stream for copying between CPU and target device.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\_functions.py",
    "ast_data": "FunctionDef name:_get_stream arg:device arguments arg If BoolOp Compare Call Return return:no Compare Call If Compare Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "unsharp_mask",
    "source_code": "def unsharp_mask(input: Tensor, kernel_size: tuple[int, int] | int, sigma: tuple[float, float] | Tensor, border_type: str='reflect') -> Tensor:\n    data_blur: Tensor = gaussian_blur2d(input, kernel_size, sigma, border_type)\n    data_sharpened: Tensor = input + (input - data_blur)\n    return data_sharpened",
    "docstring": "Create an operator that sharpens a tensor by applying operation out = 2 * image - gaussian_blur2d(image). .. image:: _static/img/unsharp_mask.png Args: input: the input tensor with shape :math:. kernel_size: the size of the kernel. sigma: the standard deviation of the kernel. border_type: the padding mode to be applied before convolving. The expected modes are: `(B,C,H,W)`. Examples: >>> input = torch.rand(2, 4, 5, 5) >>> output = unsharp_mask(input, (3, 3), (1.5, 1.5)) >>> output.shape torch.Size([2, 4, 5, 5])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\unsharp.py",
    "ast_data": "FunctionDef name:unsharp_mask arg:input arg:kernel_size arg:sigma arg:border_type arguments arg arg arg arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "strip_unused_from_files",
    "source_code": "def strip_unused_from_files(input_graph, input_binary, output_graph, output_binary, input_node_names, output_node_names, placeholder_type_enum):\n    if not gfile.Exists(input_graph):\n        print(\"Input graph file '\" + input_graph + \"' does not exist!\")\n        return -1\n    if not output_node_names:\n        print('You need to supply the name of a node to --output_node_names.')\n        return -1\n    input_graph_def = graph_pb2.GraphDef()\n    mode = 'rb' if input_binary else 'r'\n    with gfile.GFile(input_graph, mode) as f:\n        if input_binary:\n            input_graph_def.ParseFromString(f.read())\n        else:\n            text_format.Merge(f.read(), input_graph_def)\n    output_graph_def = strip_unused(input_graph_def, input_node_names.split(','), output_node_names.split(','), placeholder_type_enum)\n    if output_binary:\n        with gfile.GFile(output_graph, 'wb') as f:\n            f.write(output_graph_def.SerializeToString())\n    else:\n        with gfile.GFile(output_graph, 'w') as f:\n            f.write(text_format.MessageToString(output_graph_def))\n    print('%d ops in the final graph.' % len(output_graph_def.node))",
    "docstring": "Removes unused nodes from a graph file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\strip_unused_lib.py",
    "ast_data": "FunctionDef name:strip_unused_from_files arg:input_graph arg:input_binary arg:output_graph arg:output_binary arg:input_node_names arg:output_node_names arg:placeholder_type_enum arguments arg arg arg arg arg arg arg If Call Call Return return:yes If Call Return return:yes Assign Call Assign With Call If Call Call Call Call Assign Call Call Call If With Call Call Call With Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "make_layoutgrids",
    "source_code": "def make_layoutgrids(fig, layoutgrids, rect=(0, 0, 1, 1)):\n    if layoutgrids is None:\n        layoutgrids = dict()\n        layoutgrids['hasgrids'] = False\n    if not hasattr(fig, '_parent'):\n        layoutgrids[fig] = mlayoutgrid.LayoutGrid(parent=rect, name='figlb')\n    else:\n        gs = fig._subplotspec.get_gridspec()\n        layoutgrids = make_layoutgrids_gs(layoutgrids, gs)\n        parentlb = layoutgrids[gs]\n        layoutgrids[fig] = mlayoutgrid.LayoutGrid(parent=parentlb, name='panellb', parent_inner=True, nrows=1, ncols=1, parent_pos=(fig._subplotspec.rowspan, fig._subplotspec.colspan))\n    for sfig in fig.subfigs:\n        layoutgrids = make_layoutgrids(sfig, layoutgrids)\n    for ax in fig._localaxes:\n        gs = ax.get_gridspec()\n        if gs is not None:\n            layoutgrids = make_layoutgrids_gs(layoutgrids, gs)\n    return layoutgrids",
    "docstring": "Make the layoutgrid tree. (Sub)Figures get a layoutgrid so we can have figure margins. Gridspecs that are attached to Axes get a layoutgrid so Axes can have margins.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_constrained_layout.py",
    "ast_data": "FunctionDef name:make_layoutgrids arg:fig arg:layoutgrids arg:rect arguments arg arg arg If Compare Assign Call Assign If Call Assign Call Assign Call Assign Call Assign Assign Call For Assign Call For Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "forward_event_shape_tensor",
    "source_code": "def forward_event_shape_tensor(self, input_shape, name='forward_event_shape_tensor'):\n    with self._name_scope(name, [input_shape]):\n        input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32, name='input_shape')\n        return self._forward_event_shape_tensor(input_shape)",
    "docstring": "Shape of a single sample from a single batch as an 1D . Args: input_shape: , vector indicating event-portion shape passed into function. name: name to give to the op Returns: forward_event_shape_tensor: , vector indicating event-portion shape after applying .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:forward_event_shape_tensor arg:self arg:input_shape arg:name arguments arg arg arg With Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "PackageUnpickler",
    "source_code": "class PackageUnpickler(pickle._Unpickler):\n\n    def __init__(self, importer: Importer, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._importer = importer\n\n    def find_class(self, module, name):\n        if self.proto < 3 and self.fix_imports:\n            if (module, name) in _compat_pickle.NAME_MAPPING:\n                module, name = _compat_pickle.NAME_MAPPING[module, name]\n            elif module in _compat_pickle.IMPORT_MAPPING:\n                module = _compat_pickle.IMPORT_MAPPING[module]\n        mod = self._importer.import_module(module)\n        return getattr(mod, name)",
    "docstring": "Package-aware unpickler. This behaves the same as a normal unpickler, except it uses to find any global names that it encounters while unpickling.",
    "type": "class",
    "file_path": "pytorch\\torch\\package\\_package_unpickler.py",
    "ast_data": "ClassDef name:PackageUnpickler FunctionDef name:__init__ arg:self arg:importer arguments arg arg arg arg Call Call Assign FunctionDef name:find_class arg:self arg:module arg:name arguments arg arg arg If BoolOp Compare If Compare Assign If Compare Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_add_call_warning",
    "source_code": "def _maybe_add_call_warning(self, node, full_name, name):\n    warned = False\n    if isinstance(node.func, ast.Attribute):\n        warned = self._maybe_add_warning(node, '*.' + name)\n    arg_warnings = self._get_applicable_dict('function_arg_warnings', full_name, name)\n    variadic_args = uses_star_args_or_kwargs_in_call(node)\n    for (kwarg, arg), (level, warning) in sorted(arg_warnings.items()):\n        present, _ = get_arg_value(node, kwarg, arg) or variadic_args\n        if present:\n            warned = True\n            warning_message = warning.replace('<function name>', full_name or name)\n            template = '%s called with %s argument, requires manual check: %s'\n            if variadic_args:\n                template = '%s called with *args or **kwargs that may include %s, requires manual check: %s'\n            self.add_log(level, node.lineno, node.col_offset, template % (full_name or name, kwarg, warning_message))\n    return warned",
    "docstring": "Print a warning when specific functions are called with selected args. The function _print_warning_for_function matches the full name of the called function, e.g., tf.foo.bar(). This function matches the function name that is called, as long as the function is an attribute. For example, and are matched, but not . Args: node: ast.Call object full_name: The precomputed full name of the callable, if one exists, None otherwise. name: The precomputed name of the callable, if one exists, None otherwise. Returns: Whether an error was recorded.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:_maybe_add_call_warning arg:self arg:node arg:full_name arg:name arguments arg arg arg arg Assign If Call Assign Call Assign Call Assign Call For Call Call Assign BoolOp Call If Assign Assign Call BoolOp Assign If Assign Call BoolOp Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_variable_call",
    "source_code": "@classmethod\ndef _variable_call(cls, initial_value=None, trainable=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, import_scope=None, constraint=None, synchronization=VariableSynchronization.AUTO, aggregation=VariableAggregation.NONE, shape=None, experimental_enable_variable_lifting=None, **kwargs):\n    if cls is not Variable:\n        return None\n    previous_getter = lambda **kws: default_variable_creator_v2(None, **kws)\n    for _, getter in ops.get_default_graph()._variable_creator_stack:\n        previous_getter = _make_getter(getter, previous_getter)\n    if aggregation is None:\n        aggregation = VariableAggregation.NONE\n    return previous_getter(initial_value=initial_value, trainable=trainable, validate_shape=validate_shape, caching_device=caching_device, name=name, variable_def=variable_def, dtype=dtype, import_scope=import_scope, constraint=constraint, synchronization=synchronization, aggregation=aggregation, shape=shape, experimental_enable_variable_lifting=experimental_enable_variable_lifting, **kwargs)",
    "docstring": "Variable class getter. Useful to force the signature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:_variable_call arg:cls arg:initial_value arg:trainable arg:validate_shape arg:caching_device arg:name arg:variable_def arg:dtype arg:import_scope arg:constraint arg:synchronization arg:aggregation arg:shape arg:experimental_enable_variable_lifting arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg If Compare Return return:no Assign arguments arg Call For Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_find_consumer_matmuls",
    "source_code": "def _find_consumer_matmuls(node: torch.fx.Node) -> list[_Matmul]:\n    matmuls = []\n    for user in node.users:\n        if user.target == aten.reshape.default:\n            matmuls.extend(_find_reshape_mm_reshape(user))\n        elif user.target == aten.mm.default:\n            matmul = _Matmul.from_match(match=[user])\n            matmuls.append(matmul)\n        elif user.target == aten._scaled_mm.default:\n            matmul = _ScaledMatmul.from_match([user])\n            matmuls.append(matmul)\n    return matmuls",
    "docstring": "Find the matmuls that use as the lhs argument.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\micro_pipeline_tp.py",
    "ast_data": "FunctionDef name:_find_consumer_matmuls arg:node arguments arg Assign For If Compare Call Call If Compare Assign Call Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "strip_tags",
    "source_code": "@keep_lazy_text\ndef strip_tags(value):\n    value = str(value)\n    for long_open_tag in long_open_tag_without_closing_re.finditer(value):\n        if long_open_tag.group().count('<') >= MAX_STRIP_TAGS_DEPTH:\n            raise SuspiciousOperation\n    strip_tags_depth = 0\n    while '<' in value and '>' in value:\n        if strip_tags_depth >= MAX_STRIP_TAGS_DEPTH:\n            raise SuspiciousOperation\n        new_value = _strip_once(value)\n        if value.count('<') == new_value.count('<'):\n            break\n        value = new_value\n        strip_tags_depth += 1\n    return value",
    "docstring": "Return the given HTML with all tags stripped.",
    "type": "function",
    "file_path": "django\\django\\utils\\html.py",
    "ast_data": "FunctionDef name:strip_tags arg:value arguments arg Assign Call For Call If Compare Call Call Raise Assign While BoolOp Compare Compare If Compare Raise Assign Call If Compare Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reconstruct_non_debug_graph_def",
    "source_code": "def reconstruct_non_debug_graph_def(debug_graph_def):\n    return DebugGraph(debug_graph_def).non_debug_graph_def",
    "docstring": "Reconstruct original (non-debugger-decorated) partition GraphDef. This method strips the input of the Copy* and Debug*-type nodes inserted by the debugger. The reconstructed partition graph is identical to the original (i.e., non-debugger-decorated) partition graph except in the following respects: 1) The exact names of the runtime-inserted internal nodes may differ. These include _Send, _Recv, _HostSend, _HostRecv, _Retval ops. 2) As a consequence of 1, the nodes that receive input directly from such send- and recv-type ops will have different input names. 3) The parallel_iteration attribute of while-loop Enter ops are set to 1. Args: debug_graph_def: The debugger-decorated , with the debugger-inserted Copy* and Debug* nodes. Returns: The reconstructed stripped of the debugger-inserted nodes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:reconstruct_non_debug_graph_def arg:debug_graph_def arguments arg Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "reset",
    "source_code": "def reset(self) -> None:\n    self.update(self._default)",
    "docstring": "Update the theme dictionary with seaborn's default values.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg Call"
  },
  {
    "library": "pandas",
    "name": "diff",
    "source_code": "@final\ndef diff(self, n: int) -> list[Block]:\n    new_values = algos.diff(self.values.T, n, axis=0).T\n    return [self.make_block(values=new_values)]",
    "docstring": "return block for the diff of the values",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:diff arg:self arg:n arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "WriteGraphExecutionTrace",
    "source_code": "def WriteGraphExecutionTrace(self, graph_execution_trace):\n    debug_event = debug_event_pb2.DebugEvent(graph_execution_trace=graph_execution_trace)\n    self._EnsureTimestampAdded(debug_event)\n    _pywrap_debug_events_writer.WriteGraphExecutionTrace(self._dump_root, debug_event)",
    "docstring": "Write a GraphExecutionTrace proto with the writer. Args: graph_execution_trace: A GraphExecutionTrace proto, concerning the value of an intermediate tensor or a list of intermediate tensors that are computed during the graph's execution.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_writer.py",
    "ast_data": "FunctionDef name:WriteGraphExecutionTrace arg:self arg:graph_execution_trace arguments arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_dequantize_opcode_idx",
    "source_code": "def get_dequantize_opcode_idx(model):\n    quant_opcode_idxs = []\n    for idx, opcode in enumerate(model.operatorCodes):\n        builtin_code = schema_util.get_builtin_code_from_operator_code(opcode)\n        if builtin_code == schema_fb.BuiltinOperator.DEQUANTIZE:\n            quant_opcode_idxs.append(idx)\n    return quant_opcode_idxs",
    "docstring": "Returns the quantize op idx.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:get_dequantize_opcode_idx arg:model arguments arg Assign For Call Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "convert_units",
    "source_code": "def convert_units(self, x):\n    if np.issubdtype(np.asarray(x).dtype, np.number):\n        return x\n    elif self.converter is None:\n        return x\n    return self.converter.convert(x, self.units, self)",
    "docstring": "Return a numeric representation of the input data.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\scales.py",
    "ast_data": "FunctionDef name:convert_units arg:self arg:x arguments arg arg If Call Call Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_array_api_dispatch",
    "source_code": "def _check_array_api_dispatch(array_api_dispatch):\n    if not array_api_dispatch:\n        return\n    scipy_version = parse_version(scipy.__version__)\n    min_scipy_version = '1.14.0'\n    if scipy_version < parse_version(min_scipy_version):\n        raise ImportError(f'SciPy must be {min_scipy_version} or newer (found {{scipy.__version__}}) to dispatch array using the array API specification')\n    if os.environ.get('SCIPY_ARRAY_API') != '1':\n        raise RuntimeError(\"Scikit-learn array API support was enabled but scipy's own support is not enabled. Please set the SCIPY_ARRAY_API=1 environment variable before importing sklearn or scipy. More details at: https://docs.scipy.org/doc/scipy/dev/api-dev/array_api.html\")",
    "docstring": "Check that array_api_compat is installed and NumPy version is compatible. array_api_compat follows NEP29, which has a higher minimum NumPy version than scikit-learn.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:_check_array_api_dispatch arg:array_api_dispatch arguments arg If Return return:no Assign Call Assign If Compare Call Raise Call If Compare Call Raise Call"
  },
  {
    "library": "pandas",
    "name": "_calc_rows",
    "source_code": "def _calc_rows(self, header: int | Sequence[int] | None, index_col: int | Sequence[int] | None, skiprows: Sequence[int] | int | Callable[[int], object] | None, nrows: int | None) -> int | None:\n    if nrows is None:\n        return None\n    if header is None:\n        header_rows = 1\n    elif is_integer(header):\n        header = cast(int, header)\n        header_rows = 1 + header\n    else:\n        header = cast(Sequence, header)\n        header_rows = 1 + header[-1]\n    if is_list_like(header) and index_col is not None:\n        header = cast(Sequence, header)\n        if len(header) > 1:\n            header_rows += 1\n    if skiprows is None:\n        return header_rows + nrows\n    if is_integer(skiprows):\n        skiprows = cast(int, skiprows)\n        return header_rows + nrows + skiprows\n    if is_list_like(skiprows):\n\n        def f(skiprows: Sequence, x: int) -> bool:\n            return x in skiprows\n        skiprows = cast(Sequence, skiprows)\n        return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows)\n    if callable(skiprows):\n        return self._check_skiprows_func(skiprows, header_rows + nrows)\n    return None",
    "docstring": "If nrows specified, find the number of rows needed from the file, otherwise return None. Parameters ---------- header : int, list of int, or None See read_excel docstring. index_col : int, str, list of int, or None See read_excel docstring. skiprows : list-like, int, callable, or None See read_excel docstring. nrows : int or None See read_excel docstring. Returns ------- int or None",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:_calc_rows arg:self arg:header arg:index_col arg:skiprows arg:nrows arguments arg arg arg arg arg If Compare Return return:no If Compare Assign If Call Assign Call Assign Assign Call Assign If BoolOp Call Compare Assign Call If Compare Call If Compare Return return:yes If Call Assign Call Return return:yes If Call FunctionDef name:f arg:skiprows arg:x arguments arg arg Return return:yes Compare Assign Call Return return:yes Call Call If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "convert_to_trackable",
    "source_code": "def convert_to_trackable(obj, parent=None):\n    if isinstance(obj, base.Trackable):\n        return obj\n    obj = data_structures.wrap_or_unwrap(obj)\n    if tensor_util.is_tf_type(obj) and obj.dtype not in (dtypes.variant, dtypes.resource) and (not resource_variable_ops.is_resource_variable(obj)):\n        return saved_model_utils.TrackableConstant(obj, parent)\n    if not isinstance(obj, base.Trackable):\n        raise ValueError(f'Cannot convert {obj} to Trackable.')\n    return obj",
    "docstring": "Converts to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\converter.py",
    "ast_data": "FunctionDef name:convert_to_trackable arg:obj arg:parent arguments arg arg If Call Return return:yes Assign Call If BoolOp Call Compare Call Return return:yes Call If Call Raise Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "generate",
    "source_code": "def generate(self, grant_type, client, user=None, scope=None, expires_in=None):\n    if expires_in is None:\n        expires_in = self.DEFAULT_EXPIRES_IN\n    token_data = self.get_token_data(grant_type, client, expires_in, user, scope)\n    access_token = jwt.encode({'alg': self.alg}, token_data, key=self.secret_key, check=False)\n    token = {'token_type': 'Bearer', 'access_token': to_native(access_token), 'expires_in': expires_in}\n    if scope:\n        token['scope'] = scope\n    return token",
    "docstring": "Generate a bearer token for OAuth 2.0 authorization token endpoint. :param client: the client that making the request. :param grant_type: current requested grant_type. :param user: current authorized user. :param expires_in: if provided, use this value as expires_in. :param scope: current requested scope. :return: Token dict",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7523\\token.py",
    "ast_data": "FunctionDef name:generate arg:self arg:grant_type arg:client arg:user arg:scope arg:expires_in arguments arg arg arg arg arg arg If Compare Assign Assign Call Assign Call Assign Call If Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PoissonNLLLoss",
    "source_code": "class PoissonNLLLoss(_Loss):\n    __constants__ = ['log_input', 'full', 'eps', 'reduction']\n    log_input: bool\n    full: bool\n    eps: float\n\n    def __init__(self, log_input: bool=True, full: bool=False, size_average=None, eps: float=1e-08, reduce=None, reduction: str='mean') -> None:\n        super().__init__(size_average, reduce, reduction)\n        self.log_input = log_input\n        self.full = full\n        self.eps = eps\n\n    def forward(self, log_input: Tensor, target: Tensor) -> Tensor:\n        return F.poisson_nll_loss(log_input, target, log_input=self.log_input, full=self.full, eps=self.eps, reduction=self.reduction)",
    "docstring": "Negative log likelihood loss with Poisson distribution of target. The loss can be described as: .. math:: \\text{target} \\sim \\mathrm{Poisson}(\\text{input}) \\text{loss}(\\text{input}, \\text{target}) = \\text{input} - \\text{target} * \\log(\\text{input}) + \\log(\\text{target!}) The last term can be omitted or approximated with Stirling formula. The approximation is used for target values more than 1. For targets less or equal to 1 zeros are added to the loss. Args: log_input (bool, optional): if `\\exp(\\text{input}) - \\text{target}*\\text{input}\\text{input} - \\text{target}*\\log(\\text{input}+\\text{eps})reductionsize_averagereduce\\log(0)log_input = Falsereductionsize_averagereducesize_averagesize_averagereducereduction(*)*(*)reduction(*)`, the same shape as the input.",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\loss.py",
    "ast_data": "ClassDef name:PoissonNLLLoss Assign FunctionDef name:__init__ arg:self arg:log_input arg:full arg:size_average arg:eps arg:reduce arg:reduction arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign FunctionDef name:forward arg:self arg:log_input arg:target arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "corners",
    "source_code": "@property\ndef corners(self):\n    x0, y0, width, height = self._rect_bbox\n    xc = (x0, x0 + width, x0 + width, x0)\n    yc = (y0, y0, y0 + height, y0 + height)\n    transform = self._get_rotation_transform()\n    coords = transform.transform(np.array([xc, yc]).T).T\n    return (coords[0], coords[1])",
    "docstring": "Corners of rectangle in data coordinates from lower left, moving clockwise.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:corners arg:self arguments arg Assign Assign Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_read_byte",
    "source_code": "def _read_byte(f):\n    return np.uint8(struct.unpack('>B', f.read(4)[:1])[0])",
    "docstring": "Read a single byte",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_byte arg:f arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_convert_tuple",
    "source_code": "def _convert_tuple(value, expected_type, path, context):\n    if not isinstance(value, typing.Sequence):\n        raise TypeError(f'{''.join(path)}: expected tuple, got {type(value).__name__!r}')\n    element_types = type_annotations.get_generic_type_args(expected_type)\n    if len(element_types) == 2 and element_types[1] is Ellipsis:\n        return tuple([_convert_value(v, element_types[0], path + (f'[{i}]',), context) for i, v in enumerate(value)])\n    else:\n        if len(value) != len(element_types):\n            raise TypeError(f'{''.join(path)}: expected tuple with length {len(element_types)}, got {type(value).__name__!r})')\n        return tuple([_convert_value(v, t, path + (f'[{i}]',), context) for i, (v, t) in enumerate(zip(value, element_types))])",
    "docstring": "Converts to a tuple with type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type_field.py",
    "ast_data": "FunctionDef name:_convert_tuple arg:value arg:expected_type arg:path arg:context arguments arg arg arg arg If Call Raise Call Call Call Assign Call If BoolOp Compare Call Compare Return return:yes Call Call Call If Compare Call Call Raise Call Call Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "run_fn",
    "source_code": "def run_fn(inputs):\n    fn_result = fn(ctx, inputs)\n    flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n    if flat_last_step_outputs:\n        with ops.control_dependencies([fn_result]):\n            return [array_ops.identity(f) for f in flat_last_step_outputs]\n    else:\n        return fn_result",
    "docstring": "Single step on the TPU device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:run_fn arg:inputs arguments arg Assign Call Assign Call If With Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "read_reals",
    "source_code": "def read_reals(self, dtype='f8'):\n    return self.read_record(dtype)",
    "docstring": "Reads a record of a given type from the file, defaulting to a floating point number (`` in Fortran). Parameters ---------- dtype : dtype, optional Data type specifying the size and endianness of the data. Returns ------- data : ndarray A 1-D array object. See Also -------- read_ints read_record",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_fortran.py",
    "ast_data": "FunctionDef name:read_reals arg:self arg:dtype arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "filter",
    "source_code": "def filter(self, items=None, like: str | None=None, regex: str | None=None, axis: Axis | None=None) -> Self:\n    nkw = common.count_not_none(items, like, regex)\n    if nkw > 1:\n        raise TypeError('Keyword arguments `items`, `like`, or `regex` are mutually exclusive')\n    if axis is None:\n        axis = self._info_axis_name\n    labels = self._get_axis(axis)\n    if items is not None:\n        name = self._get_axis_name(axis)\n        items = Index(items).intersection(labels)\n        if len(items) == 0:\n            items = items.astype(labels.dtype)\n        return self.reindex(**{name: items})\n    elif like:\n\n        def f(x) -> bool:\n            assert like is not None\n            return like in ensure_str(x)\n        values = labels.map(f)\n        return self.loc(axis=axis)[values]\n    elif regex:\n\n        def f(x) -> bool:\n            return matcher.search(ensure_str(x)) is not None\n        matcher = re.compile(regex)\n        values = labels.map(f)\n        return self.loc(axis=axis)[values]\n    else:\n        raise TypeError('Must pass either `items`, `like`, or `regex`')",
    "docstring": "Subset the DataFrame or Series according to the specified index labels. For DataFrame, filter rows or columns depending on ``. Examples -------- >>> df = pd.DataFrame( ... np.array(([1, 2, 3], [4, 5, 6])), ... index=[\"mouse\", \"rabbit\"], ... columns=[\"one\", \"two\", \"three\"], ... ) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=[\"one\", \"three\"]) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex=\"e$\", axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like=\"bbi\", axis=0) one two three rabbit 4 5 6",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:filter arg:self arg:items arg:like arg:regex arg:axis arguments arg arg arg arg arg Assign Call If Compare Raise Call If Compare Assign Assign Call If Compare Assign Call Assign Call Call If Compare Call Assign Call Return return:yes Call If FunctionDef name:f arg:x arguments arg Compare Return return:yes Compare Call Assign Call Return return:yes Call If FunctionDef name:f arg:x arguments arg Return return:yes Compare Call Call Assign Call Assign Call Return return:yes Call Raise Call"
  },
  {
    "library": "kornia",
    "name": "solarize_add",
    "source_code": "def solarize_add(min_mag: float, max_mag: float) -> OperationBase:\n    return SolarizeAdd(None, 1.0, magnitude_range=(min_mag, max_mag))",
    "docstring": "Return SolarizeAdd op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py",
    "ast_data": "FunctionDef name:solarize_add arg:min_mag arg:max_mag arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_make_paths_from_contour_generator",
    "source_code": "def _make_paths_from_contour_generator(self):\n    if self._paths is not None:\n        return self._paths\n    cg = self._contour_generator\n    empty_path = Path(np.empty((0, 2)))\n    vertices_and_codes = map(cg.create_filled_contour, *self._get_lowers_and_uppers()) if self.filled else map(cg.create_contour, self.levels)\n    return [Path(np.concatenate(vs), np.concatenate(cs)) if len(vs) else empty_path for vs, cs in vertices_and_codes]",
    "docstring": "Compute `` using C extension.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:_make_paths_from_contour_generator arg:self arguments arg If Compare Return return:yes Assign Assign Call Call Assign Call Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "django",
    "name": "format_json_path_numeric_index",
    "source_code": "def format_json_path_numeric_index(self, num):\n    return '[%s]' % num",
    "docstring": "Hook for backends to customize array indexing in JSON paths.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:format_json_path_numeric_index arg:self arg:num arguments arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_changelist_form",
    "source_code": "def get_changelist_form(self, request, **kwargs):\n    defaults = {'formfield_callback': partial(self.formfield_for_dbfield, request=request), **kwargs}\n    if defaults.get('fields') is None and (not modelform_defines_fields(defaults.get('form'))):\n        defaults['fields'] = forms.ALL_FIELDS\n    return modelform_factory(self.model, **defaults)",
    "docstring": "Return a Form class for use in the Formset on the changelist page.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_changelist_form arg:self arg:request arguments arg arg arg Assign Call If BoolOp Compare Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "RendezvousStoreInfo",
    "source_code": "@dataclass\nclass RendezvousStoreInfo:\n    MASTER_ADDR_KEY: ClassVar[str] = 'MASTER_ADDR'\n    MASTER_PORT_KEY: ClassVar[str] = 'MASTER_PORT'\n    master_addr: str\n    master_port: int\n\n    @staticmethod\n    def build(rank: int, store: Store, local_addr: Optional[str], server_port: Optional[int]=None) -> 'RendezvousStoreInfo':\n        if rank == 0:\n            addr = local_addr or socket.getfqdn()\n            port = server_port or get_free_port()\n            store.set(RendezvousStoreInfo.MASTER_ADDR_KEY, addr.encode(encoding='UTF-8'))\n            store.set(RendezvousStoreInfo.MASTER_PORT_KEY, str(port).encode(encoding='UTF-8'))\n        addr = store.get(RendezvousStoreInfo.MASTER_ADDR_KEY).decode(encoding='UTF-8')\n        port = int(store.get(RendezvousStoreInfo.MASTER_PORT_KEY).decode(encoding='UTF-8'))\n        return RendezvousStoreInfo(master_addr=addr, master_port=port)",
    "docstring": "Store address and port that can be used to bootstrap trainer distributed comms",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "ClassDef name:RendezvousStoreInfo FunctionDef name:build arg:rank arg:store arg:local_addr arg:server_port arguments arg arg arg arg If Compare Assign BoolOp Call Assign BoolOp Call Call Call Call Call Call Assign Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "set_as_test_mirror",
    "source_code": "def set_as_test_mirror(self, primary_settings_dict):\n    self.connection.settings_dict['USER'] = primary_settings_dict['USER']\n    self.connection.settings_dict['PASSWORD'] = primary_settings_dict['PASSWORD']",
    "docstring": "Set this database up to be used in testing as a mirror of a primary database whose settings are given.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\creation.py",
    "ast_data": "FunctionDef name:set_as_test_mirror arg:self arg:primary_settings_dict arguments arg arg Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_bad",
    "source_code": "def get_bad(self):\n    return np.array(self._rgba_bad)",
    "docstring": "Get the color for masked values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:get_bad arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "compress_kml",
    "source_code": "def compress_kml(kml):\n    kmz = BytesIO()\n    with zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED) as zf:\n        zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))\n    kmz.seek(0)\n    return kmz.read()",
    "docstring": "Return compressed KMZ from the given KML string.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\shortcuts.py",
    "ast_data": "FunctionDef name:compress_kml arg:kml arguments arg Assign Call With Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "load_resource",
    "source_code": "@tf_export(v1=['resource_loader.load_resource'])\ndef load_resource(path):\n    with open(get_path_to_datafile(path), 'rb') as f:\n        return f.read()",
    "docstring": "Load the resource at given path, where path is relative to tensorflow/. Args: path: a string resource path relative to tensorflow/. Returns: The contents of that resource. Raises: IOError: If the path is not found, or the resource can't be opened.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\resource_loader.py",
    "ast_data": "FunctionDef name:load_resource arg:path arguments arg With Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "urlretrieve",
    "source_code": "def urlretrieve(url, filename, reporthook=None, data=None):\n\n    def chunk_read(response, chunk_size=8192, reporthook=None):\n        content_type = response.info().get('Content-Length')\n        total_size = -1\n        if content_type is not None:\n            total_size = int(content_type.strip())\n        count = 0\n        while True:\n            chunk = response.read(chunk_size)\n            count += 1\n            if reporthook is not None:\n                reporthook(count, chunk_size, total_size)\n            if chunk:\n                yield chunk\n            else:\n                break\n    response = urlopen(url, data)\n    with open(filename, 'wb') as fd:\n        for chunk in chunk_read(response, reporthook=reporthook):\n            fd.write(chunk)",
    "docstring": "Replacement for for Python 2. Under Python 2, relies on from legacy module, known to have issues with proxy management. Args: url: url to retrieve. filename: where to store the retrieved data locally. reporthook: a hook function that will be called once on establishment of the network connection and once after each block read thereafter. The hook will be passed three arguments; a count of blocks transferred so far, a block size in bytes, and the total size of the file. data: argument passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:urlretrieve arg:url arg:filename arg:reporthook arg:data arguments arg arg arg arg FunctionDef name:chunk_read arg:response arg:chunk_size arg:reporthook arguments arg arg arg Assign Call Call Assign If Compare Assign Call Call Assign While Assign Call If Compare Call If Assign Call With Call For Call Call"
  },
  {
    "library": "pandas",
    "name": "right",
    "source_code": "@cache_readonly\ndef right(self) -> Index:\n    return Index(self._data.right, copy=False)",
    "docstring": "Return right bounds of the intervals in the IntervalIndex. The right bounds of each interval in the IntervalIndex are returned as an Index. The datatype of the right bounds is the same as the datatype of the endpoints of the intervals. Returns ------- Index An Index containing the right bounds of the intervals. See Also -------- IntervalIndex.left : Return the left bounds of the intervals in the IntervalIndex. IntervalIndex.mid : Return the mid-point of the intervals in the IntervalIndex. IntervalIndex.length : Return the length of the intervals in the IntervalIndex. Examples -------- >>> iv_idx = pd.IntervalIndex.from_arrays([1, 2, 3], [4, 5, 6], closed=\"right\") >>> iv_idx.right Index([4, 5, 6], dtype='int64') >>> iv_idx = pd.IntervalIndex.from_tuples( ... [(1, 4), (2, 5), (3, 6)], closed=\"left\" ... ) >>> iv_idx.right Index([4, 5, 6], dtype='int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:right arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "from_matrix",
    "source_code": "@classmethod\ndef from_matrix(cls, matrix: Tensor, frame_src: str | None=None, frame_dst: str | None=None) -> NamedPose | None:\n    check_matrix_shape(matrix, matrix_type='RT')\n    dim = matrix.shape[-1]\n    if dim == 3:\n        return cls(Se2.from_matrix(matrix), frame_src, frame_dst)\n    elif dim == 4:\n        return cls(Se3.from_matrix(matrix), frame_src, frame_dst)\n    return None",
    "docstring": "Construct NamedPose from a matrix. Args: matrix: Matrix representation of the pose. frame_src: Name of the source frame. frame_dst: Name of the destination frame. Returns: NamedPose constructed from a matrix. Example: >>> b_from_a_matrix = Se3.identity().matrix() >>> b_from_a = NamedPose.from_matrix(b_from_a_matrix, frame_src=\"frame_a\", frame_dst=\"frame_b\") >>> b_from_a NamedPose(dst_from_src=rotation: Parameter containing: tensor([1., 0., 0., 0.], requires_grad=True) translation: Parameter containing: tensor([0., 0., 0.], requires_grad=True), frame_src: frame_a -> frame_dst: frame_b)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\pose.py",
    "ast_data": "FunctionDef name:from_matrix arg:cls arg:matrix arg:frame_src arg:frame_dst arguments arg arg arg arg Call Assign If Compare Return return:yes Call Call If Compare Return return:yes Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "tpu_hardware_feature",
    "source_code": "@property\ndef tpu_hardware_feature(self):\n    if self._tpu_topology is None:\n        return self._tpu_topology\n    return self._tpu_topology.tpu_hardware_feature",
    "docstring": "Returns the tpu topology info stored.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py",
    "ast_data": "FunctionDef name:tpu_hardware_feature arg:self arguments arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_default_qconfig_mapping",
    "source_code": "def get_default_qconfig_mapping(backend='x86', version=0) -> QConfigMapping:\n    return _get_default_qconfig_mapping(False, backend, version)",
    "docstring": "Return the default QConfigMapping for post training quantization. Args: * `` (int) : the version for the default qconfig mapping",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py",
    "ast_data": "FunctionDef name:get_default_qconfig_mapping arg:backend arg:version arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_configure_embeddings",
    "source_code": "def _configure_embeddings(self):\n    from google.protobuf import text_format\n    from tensorflow.python.keras.layers import embeddings\n    from tensorflow.python.keras.protobuf import projector_config_pb2\n    config = projector_config_pb2.ProjectorConfig()\n    for layer in self.model.layers:\n        if isinstance(layer, embeddings.Embedding):\n            embedding = config.embeddings.add()\n            name = 'layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE'\n            embedding.tensor_name = name\n            if self.embeddings_metadata is not None:\n                if isinstance(self.embeddings_metadata, str):\n                    embedding.metadata_path = self.embeddings_metadata\n                elif layer.name in self.embeddings_metadata.keys():\n                    embedding.metadata_path = self.embeddings_metadata.pop(layer.name)\n    if self.embeddings_metadata and (not isinstance(self.embeddings_metadata, str)):\n        raise ValueError('Unrecognized `Embedding` layer names passed to `keras.callbacks.TensorBoard` `embeddings_metadata` argument: ' + str(self.embeddings_metadata.keys()))\n    config_pbtxt = text_format.MessageToString(config)\n    path = os.path.join(self._log_write_dir, 'projector_config.pbtxt')\n    with gfile.Open(path, 'w') as f:\n        f.write(config_pbtxt)",
    "docstring": "Configure the Projector for embeddings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_configure_embeddings arg:self arguments arg Assign Call For If Call Assign Call Assign Assign If Compare If Call Assign If Compare Call Assign Call If BoolOp Call Raise Call Call Call Assign Call Assign Call With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_capture_as_const",
    "source_code": "def _capture_as_const(self, name) -> Optional[tensor_lib.Tensor]:\n    with control_dependencies(None):\n        constant_value = tensor_util.constant_value(self)\n        if constant_value is None:\n            return None\n        const_tensor = _create_graph_constant(constant_value, dtype=self.dtype, shape=self.shape, name=name, verify_shape=False, allow_broadcast=True)\n    return const_tensor",
    "docstring": "Capture the EagerTensor to a graph constant tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_capture_as_const arg:self arg:name arguments arg arg With Call Assign Call If Compare Return return:no Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    for i in range(len(self)):\n        yield self[i]",
    "docstring": "Iterate over each Geometry in the Collection.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\collections.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_computed_buffer_name",
    "source_code": "def get_computed_buffer_name(self) -> Optional[str]:\n    if self.name is not None:\n        return self.name\n    if hasattr(self.data, 'name'):\n        return self.data.name\n    return None",
    "docstring": "Returns self.name if it exists, otherwise returns the name of the data node if that exists. If neither exist, returns None.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:get_computed_buffer_name arg:self arguments arg If Compare Return return:yes If Call Return return:yes Return return:no"
  },
  {
    "library": "scipy",
    "name": "first_group",
    "source_code": "def first_group(self, getter=None):\n    if self.first:\n        return True\n    return self._compare_group(self.item, self.previous, getter)",
    "docstring": "Returns true if this item is the start of a new group, where groups mean that some attribute has changed. The getter can be None (the item itself changes), an attribute name like ``, a function, or a dict key or list index.",
    "type": "method",
    "file_path": "scipy\\scipy\\_build_utils\\tempita\\_looper.py",
    "ast_data": "FunctionDef name:first_group arg:self arg:getter arguments arg arg If Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, reset=False)\n    with config_context(assume_finite=True):\n        return pairwise_distances_argmin(X, self.cluster_centers_)",
    "docstring": "Predict the closest cluster each sample in X belongs to. Parameters ---------- X : array-like of shape (n_samples, n_features) New data to predict. Returns ------- labels : ndarray of shape (n_samples,) Index of the cluster each sample belongs to.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_mean_shift.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call With Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_jwks",
    "source_code": "def get_jwks(self):\n    raise NotImplementedError()",
    "docstring": "Return the JWKs that will be used to check the JWT access token signature. Developers MUST re-implement this method. Typically the JWKs are statically stored in the resource server configuration, or dynamically downloaded and cached using :ref::: def get_jwks(self): if \"jwks\" in cache: return cache.get(\"jwks\") server_metadata = get_server_metadata(self.issuer) jwks_uri = server_metadata.get(\"jwks_uri\") cache[\"jwks\"] = requests.get(jwks_uri).json() return cache[\"jwks\"]",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9068\\token_validator.py",
    "ast_data": "FunctionDef name:get_jwks arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name, default_name=None, values=None) -> None:\n    if not (default_name is None or isinstance(default_name, str)):\n        raise TypeError('`default_name` type (%s) is not a string type. You likely meant to pass this into the `values` kwarg.' % type(default_name))\n    self._name = default_name if name is None else name\n    self._default_name = default_name\n    self._values = values",
    "docstring": "Initialize the context manager. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the argument is . values: The list of arguments that are passed to the op function. Raises: TypeError: if is passed in but not a string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:default_name arg:values arguments arg arg arg arg If BoolOp Compare Call Raise Call Call Assign Compare Assign Assign"
  },
  {
    "library": "pandas",
    "name": "construct_from_string",
    "source_code": "@classmethod\ndef construct_from_string(cls, string: str_type) -> CategoricalDtype:\n    if not isinstance(string, str):\n        raise TypeError(f\"'construct_from_string' expects a string, got {type(string)}\")\n    if string != cls.name:\n        raise TypeError(f\"Cannot construct a 'CategoricalDtype' from '{string}'\")\n    return cls(ordered=None)",
    "docstring": "Construct a CategoricalDtype from a string. Parameters ---------- string : str Must be the string \"category\" in order to be successfully constructed. Returns ------- CategoricalDtype Instance of the dtype. Raises ------ TypeError If a CategoricalDtype cannot be constructed from the input.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:construct_from_string arg:cls arg:string arguments arg arg If Call Raise Call Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "sf",
    "source_code": "def sf(self, k, *args, **kwds):\n    args, loc, _ = self._parse_args(*args, **kwds)\n    k, loc = map(asarray, (k, loc))\n    args = tuple(map(asarray, args))\n    _a, _b = self._get_support(*args)\n    k = asarray(k - loc)\n    cond0 = self._argcheck(*args)\n    cond1 = (k >= _a) & (k < _b)\n    cond2 = ((k < _a) | np.isneginf(k)) & cond0\n    cond = cond0 & cond1 & np.isfinite(k)\n    output = zeros(shape(cond), 'd')\n    place(output, 1 - cond0 + np.isnan(k), self.badvalue)\n    place(output, cond2, 1.0)\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(k,) + args)\n        place(output, cond, np.clip(self._sf(*goodargs), 0, 1))\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Survival function (1 - ) at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- sf : array_like Survival function evaluated at k.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:sf arg:self arg:k arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Compare Compare Assign Compare Call Assign Call Assign Call Call Call Call Call If Call Assign Call Call Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_policy_equivalent_to_dtype",
    "source_code": "def _policy_equivalent_to_dtype(policy):\n    return type(policy) == Policy and list(policy.get_config().keys()) == ['name'] and (policy.name == '_infer' or _is_convertible_to_dtype(policy.name))",
    "docstring": "Returns True if the Policy is equivalent to a single dtype. A policy is equivalent to a single dtype if the policy's compute and variable dtypes are the same and the policy's type is Policy and not a subclass of Policy (such as PolicyV1). The \"_infer\" policy is considered equivalent to a single dtype. Args: policy: A Policy. Returns: True, if the policy is equivalent to a single dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\policy.py",
    "ast_data": "FunctionDef name:_policy_equivalent_to_dtype arg:policy arguments arg Return return:yes BoolOp Compare Call Compare Call Call Call BoolOp Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "option_image_nocomposite",
    "source_code": "def option_image_nocomposite(self):\n    return False",
    "docstring": "Return whether image composition by Matplotlib should be skipped. Raster backends should usually return False (letting the C-level rasterizer take care of image composition); vector backends should usually return ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:option_image_nocomposite arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "mask_pts_at_padded_regions",
    "source_code": "@torch.no_grad()\ndef mask_pts_at_padded_regions(grid_pt: Tensor, mask: Tensor) -> Tensor:\n    n, h, w = mask.shape\n    mask = mask.reshape(n, h * w).unsqueeze(-1).repeat(1, 1, 2)\n    grid_pt[~mask.bool()] = 0\n    return grid_pt",
    "docstring": "For megadepth dataset, zero-padding exists in images.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\loftr\\utils\\supervision.py",
    "ast_data": "FunctionDef name:mask_pts_at_padded_regions arg:grid_pt arg:mask arguments arg arg Assign Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "get_type_analyze_hook",
    "source_code": "def get_type_analyze_hook(self, fullname: str) -> _HookFunc | None:\n    if fullname in _PRECISION_DICT:\n        return _hook\n    return None",
    "docstring": "Set the precision of platform-specific subclasses. For example: , and .",
    "type": "method",
    "file_path": "numpy\\numpy\\typing\\mypy_plugin.py",
    "ast_data": "FunctionDef name:get_type_analyze_hook arg:self arg:fullname arguments arg arg If Compare Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "_unwrap_setitem_indexer",
    "source_code": "def _unwrap_setitem_indexer(self, indexer):\n    if isinstance(indexer, tuple) and len(indexer) == 2:\n        if all((isinstance(x, np.ndarray) and x.ndim == 2 for x in indexer)):\n            first, second = indexer\n            if not (second.size == 1 and (second == 0).all() and (first.shape[1] == 1)):\n                raise NotImplementedError('This should not be reached. Please report a bug at github.com/pandas-dev/pandas/')\n            indexer = first[:, 0]\n        elif lib.is_integer(indexer[1]) and indexer[1] == 0:\n            indexer = indexer[0]\n        elif com.is_null_slice(indexer[1]):\n            indexer = indexer[0]\n        elif is_list_like(indexer[1]) and indexer[1][0] == 0:\n            indexer = indexer[0]\n        else:\n            raise NotImplementedError('This should not be reached. Please report a bug at github.com/pandas-dev/pandas/')\n    return indexer",
    "docstring": "Adapt a 2D-indexer to our 1D values. This is intended for 'setitem', not 'iget' or '_slice'.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:_unwrap_setitem_indexer arg:self arg:indexer arguments arg arg If BoolOp Call Compare Call If Call BoolOp Call Compare Assign If BoolOp Compare Call Compare Compare Raise Call Assign If BoolOp Call Compare Assign If Call Assign If BoolOp Call Compare Assign Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_offset",
    "source_code": "def set_offset(self, xy):\n    self._offset = xy\n    self.offset_transform.clear()\n    self.offset_transform.translate(xy[0], xy[1])\n    self.stale = True",
    "docstring": "Set the offset of the container. Parameters ---------- xy : (float, float) The (x, y) coordinates of the offset in display units.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:set_offset arg:self arg:xy arguments arg arg Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "encode",
    "source_code": "@abc.abstractmethod\ndef encode(self, spec, value, minimum_rank=0):\n    raise NotImplementedError(f'{type(self).__name__}.encode')",
    "docstring": "Encodes as a nest of batchable or . Args: spec: The TypeSpec of the value to encode. value: A value compatible with . minimum_rank: The minimum rank for the returned Tensors, CompositeTensors, and ExtensionType values. This can be used to ensure that the encoded values can be unbatched this number of times. If , then must be compatible for all values returned by . Returns: A nest (as defined by ) of s, batchable s, or s. Stacking, unstacking, or concatenating these encoded values and then decoding the result must be equivalent to stacking, unstacking, or concatenating the original values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:encode arg:self arg:spec arg:value arg:minimum_rank arguments arg arg arg arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_to_tensor",
    "source_code": "def convert_to_tensor(value, dtype=None, dtype_hint=None):\n    if dtype is None and isinstance(value, int) and (value >= 2 ** 63):\n        dtype = dtypes.uint64\n    elif dtype is None and dtype_hint is None and isinstance(value, float):\n        dtype = np_dtypes.default_float_type()\n    return tensor_conversion.convert_to_tensor_v2_with_dispatch(value, dtype=dtype, dtype_hint=dtype_hint)",
    "docstring": "Wrapper over . Args: value: value to convert dtype: (optional) the type we would like it to be converted to. dtype_hint: (optional) soft preference for the type we would like it to be converted to. will attempt to convert value to this type first, but will not fail if conversion is not possible falling back to inferring the type instead. Returns: Value converted to tf.Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_arrays.py",
    "ast_data": "FunctionDef name:convert_to_tensor arg:value arg:dtype arg:dtype_hint arguments arg arg arg If BoolOp Compare Call Compare Assign If BoolOp Compare Compare Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_type_name",
    "source_code": "def _type_name(x):\n    if isinstance(x, dict):\n        key_types = set((_type_name(key) for key in x.keys()))\n        val_types = set((_type_name(key) for key in x.values()))\n        return '({} containing {} keys and {} values)'.format(type(x), key_types, val_types)\n    if isinstance(x, (list, tuple)):\n        types = set((_type_name(val) for val in x))\n        return '({} containing values of types {})'.format(type(x), types)\n    return str(type(x))",
    "docstring": "Generates a description of the type of an object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:_type_name arg:x arguments arg If Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call If Call Assign Call Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_error_intro",
    "source_code": "def get_error_intro(tf_error):\n    if hasattr(tf_error, 'op') and hasattr(tf_error.op, 'name'):\n        op_name = tf_error.op.name\n    else:\n        op_name = None\n    intro_lines = ['--------------------------------------', RL('!!! An error occurred during the run !!!', 'blink'), '']\n    out = debugger_cli_common.rich_text_lines_from_rich_line_list(intro_lines)\n    if op_name is not None:\n        out.extend(debugger_cli_common.RichTextLines(['You may use the following commands to debug:']))\n        out.extend(_recommend_command('ni -a -d -t %s' % op_name, 'Inspect information about the failing op.', create_link=True))\n        out.extend(_recommend_command('li -r %s' % op_name, 'List inputs to the failing op, recursively.', create_link=True))\n        out.extend(_recommend_command('lt', 'List all tensors dumped during the failing run() call.', create_link=True))\n    else:\n        out.extend(debugger_cli_common.RichTextLines(['WARNING: Cannot determine the name of the op that caused the error.']))\n    more_lines = ['', 'Op name:    %s' % op_name, 'Error type: ' + str(type(tf_error)), '', 'Details:', str(tf_error), '', '--------------------------------------', '']\n    out.extend(debugger_cli_common.RichTextLines(more_lines))\n    return out",
    "docstring": "Generate formatted intro for TensorFlow run-time error. Args: tf_error: (errors.OpError) TensorFlow run-time error object. Returns: (RichTextLines) Formatted intro message about the run-time OpError, with sample commands for debugging.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\cli_shared.py",
    "ast_data": "FunctionDef name:get_error_intro arg:tf_error arguments arg If BoolOp Call Call Assign Assign Assign Call Assign Call If Compare Call Call Call Call Call Call Call Call Call Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TaskSpec",
    "source_code": "@dataclasses.dataclass(init=True, repr=False, eq=True, frozen=True)\nclass TaskSpec:\n    stmt: str\n    setup: str\n    global_setup: str = ''\n    label: Optional[str] = None\n    sub_label: Optional[str] = None\n    description: Optional[str] = None\n    env: Optional[str] = None\n    num_threads: int = 1\n\n    @property\n    def title(self) -> str:\n        if self.label is not None:\n            return self.label + (f': {self.sub_label}' if self.sub_label else '')\n        elif '\\n' not in self.stmt:\n            return self.stmt + (f': {self.sub_label}' if self.sub_label else '')\n        return f'stmt:{(f' ({self.sub_label})' if self.sub_label else '')}\\n{textwrap.indent(self.stmt, '  ')}'\n\n    def setup_str(self) -> str:\n        return '' if self.setup == 'pass' or not self.setup else f'setup:\\n{textwrap.indent(self.setup, '  ')}' if '\\n' in self.setup else f'setup: {self.setup}'\n\n    def summarize(self) -> str:\n        sections = [self.title, self.description or '', self.setup_str()]\n        return '\\n'.join([f'{i}\\n' if '\\n' in i else i for i in sections if i])",
    "docstring": "Container for information used to define a Timer. (except globals)",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\common.py",
    "ast_data": "ClassDef name:TaskSpec FunctionDef name:title arg:self arguments arg If Compare Return return:yes If Compare Return return:yes Return return:yes Call FunctionDef name:setup_str arg:self arguments arg Return return:yes BoolOp Compare Compare Call FunctionDef name:summarize arg:self arguments arg Assign BoolOp Call Return return:yes Call Compare Call"
  },
  {
    "library": "scipy",
    "name": "cdf",
    "source_code": "def cdf(self, k, n, m):\n    self._recalc(n, m)\n    ind = np.ceil(k - self.astart).astype(int)\n    return self.freqs[:ind + 1].sum() / self.total",
    "docstring": "Cumulative distribution function.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_morestats.py",
    "ast_data": "FunctionDef name:cdf arg:self arg:k arg:n arg:m arguments arg arg arg arg Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "nlevels",
    "source_code": "@property\ndef nlevels(self) -> int:\n    return 1",
    "docstring": "Number of levels.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:nlevels arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "NcclAllReduce",
    "source_code": "@tf_export('distribute.NcclAllReduce')\nclass NcclAllReduce(AllReduceCrossDeviceOps):\n\n    def __init__(self, num_packs=1):\n        if num_packs < 0:\n            raise ValueError('NCCL all-reduce requires num_packs >= 0, but {} is specified'.format(num_packs))\n        super(NcclAllReduce, self).__init__(all_reduce_alg='nccl', num_packs=num_packs)",
    "docstring": "NCCL all-reduce implementation of CrossDeviceOps. It uses Nvidia NCCL for all-reduce. For the batch API, tensors will be repacked or aggregated for more efficient cross-device transportation. For reduces that are not all-reduce, it falls back to . Here is how you can use in :",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "ClassDef name:NcclAllReduce FunctionDef name:__init__ arg:self arg:num_packs arguments arg arg If Compare Raise Call Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "all_strings",
    "source_code": "def all_strings(lst):\n    return all((is_string(item) for item in lst))",
    "docstring": "Return True if all items in lst are string objects.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:all_strings arg:lst arguments arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "__getstate__",
    "source_code": "def __getstate__(self) -> dict[str, Any]:\n    __dict__ = {key: value for key, value in self.__dict__.items() if not key.startswith('_') and is_serializable(value)}\n    __dict__['_options'] = _options = {}\n    for name, opt in self._options.items():\n        if not isinstance(opt, _Opt) and isinstance(opt, tuple) and (len(opt) <= 3):\n            self._options[name] = opt = _Opt(*opt)\n        real_value = getattr(self, name)\n        if not is_serializable(real_value):\n            if opt.rebuild:\n                logger.warning(__('cannot cache unpickleable configuration value: %r (because it contains a function, class, or module object)'), name, type='config', subtype='cache', once=True)\n            real_value = None\n        _options[name] = (real_value, opt.rebuild)\n    return __dict__",
    "docstring": "Obtains serializable data for pickling.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\config.py",
    "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Assign Call BoolOp Call Call Assign For Call If BoolOp Call Call Compare Call Assign Call Assign Call If Call If Call Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "validate_read",
    "source_code": "def validate_read(self, columns, where) -> None:\n    if columns is not None:\n        raise TypeError('cannot pass a column specification when reading a Fixed format store. this store must be selected in its entirety')\n    if where is not None:\n        raise TypeError('cannot pass a where specification when reading from a Fixed format store. this store must be selected in its entirety')",
    "docstring": "raise if any keywords are passed which are not-None",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:validate_read arg:self arg:columns arg:where arguments arg arg arg If Compare Raise Call If Compare Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_X_y",
    "source_code": "def _check_X_y(self, X, y=None, should_be_fitted=True):\n    if should_be_fitted:\n        check_is_fitted(self)\n    if self.check_X is not None:\n        params = {} if self.check_X_params is None else self.check_X_params\n        checked_X = self.check_X(X, **params)\n        if isinstance(checked_X, (bool, np.bool_)):\n            assert checked_X\n        else:\n            X = checked_X\n    if y is not None and self.check_y is not None:\n        params = {} if self.check_y_params is None else self.check_y_params\n        checked_y = self.check_y(y, **params)\n        if isinstance(checked_y, (bool, np.bool_)):\n            assert checked_y\n        else:\n            y = checked_y\n    return (X, y)",
    "docstring": "Validate X and y and make extra check. Parameters ---------- X : array-like of shape (n_samples, n_features) The data set. is checked only if is not (default is None). y : array-like of shape (n_samples), default=None The corresponding target, by default . is checked only if is not (default is None). should_be_fitted : bool, default=True Whether or not the classifier should be already fitted. By default True. Returns ------- X, y",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_mocking.py",
    "ast_data": "FunctionDef name:_check_X_y arg:self arg:X arg:y arg:should_be_fitted arguments arg arg arg arg If Call If Compare Assign Compare Assign Call If Call Assign If BoolOp Compare Compare Assign Compare Assign Call If Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "call",
    "source_code": "@abc.abstractmethod\n@doc_controls.for_subclass_implementers\ndef call(self, y_true, y_pred):\n    raise NotImplementedError('Must be implemented in subclasses.')",
    "docstring": "Invokes the instance. Args: y_true: Ground truth values. shape = , except sparse loss functions such as sparse categorical crossentropy where shape = y_pred: The predicted values. shape = Returns: Loss values with the shape .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:call arg:self arg:y_true arg:y_pred arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "reduce_add_coalesced",
    "source_code": "def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760):\n    dense_tensors: list[list] = [[] for _ in inputs]\n    output = []\n    ref_order = []\n    for tensor_at_gpus in zip(*inputs):\n        if all((t.is_sparse for t in tensor_at_gpus)):\n            result = reduce_add(tensor_at_gpus, destination)\n            output.append(result)\n            ref_order.append(tensor_at_gpus[0])\n        else:\n            for coll, t in zip(dense_tensors, tensor_at_gpus):\n                coll.append(t.to_dense() if t.is_sparse else t)\n            ref_order.append(dense_tensors[0][-1])\n    itrs = [_take_tensors(tensors, buffer_size) for tensors in dense_tensors]\n    for chunks in zip(*itrs):\n        flat_tensors = [_flatten_dense_tensors(chunk) for chunk in chunks]\n        flat_result = reduce_add(flat_tensors, destination)\n        for t in _unflatten_dense_tensors(flat_result, chunks[0]):\n            output.append(t.data)\n    return tuple(_reorder_tensors_as(output, ref_order))",
    "docstring": "Sum tensors from multiple GPUs. Small tensors are first coalesced into a buffer to reduce the number of synchronizations. Args: inputs (Iterable[Iterable[Tensor]]): iterable of iterables that contain tensors from a single device. destination (int, optional): a device on which the output will be placed (default: current device). buffer_size (int): maximum size of the buffer used for coalescing Returns: A tuple of tensors containing an elementwise sum of each group of inputs, placed on the `` device.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\comm.py",
    "ast_data": "FunctionDef name:reduce_add_coalesced arg:inputs arg:destination arg:buffer_size arguments arg arg arg Assign Assign For Call If Call Assign Call Call Call For Call Call Call Call Assign Call For Call Assign Call Assign Call For Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_variables_path",
    "source_code": "def get_variables_path(export_dir):\n    return file_io.join(compat.as_text(get_variables_dir(export_dir)), compat.as_text(constants.VARIABLES_FILENAME))",
    "docstring": "Return the variables path, used as the prefix for checkpoint files.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\path_helpers.py",
    "ast_data": "FunctionDef name:get_variables_path arg:export_dir arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "join",
    "source_code": "def join(self):\n    c_api.TF_ServerJoin(self._server)",
    "docstring": "Blocks until the server has shut down. This method currently blocks forever. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while joining the TensorFlow server.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py",
    "ast_data": "FunctionDef name:join arg:self arguments arg Call"
  },
  {
    "library": "django",
    "name": "as_datetime",
    "source_code": "def as_datetime(self):\n    if not self.is_set:\n        return None\n    yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]\n    status = capi.get_field_as_datetime(self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd), byref(hh), byref(mn), byref(ss), byref(tz))\n    if status:\n        return (yy, mm, dd, hh, mn, ss, tz)\n    else:\n        raise GDALException('Unable to retrieve date & time information from the field.')",
    "docstring": "Retrieve the Field's value as a tuple of date & time components.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\field.py",
    "ast_data": "FunctionDef name:as_datetime arg:self arguments arg If Return return:no Assign Call Call Assign Call Call Call Call Call Call Call Call If Return return:yes Raise Call"
  },
  {
    "library": "pandas",
    "name": "rename",
    "source_code": "def rename(self, index: Renamer | Hashable | None=None, *, axis: Axis | None=None, copy: bool | lib.NoDefault=lib.no_default, inplace: bool=False, level: Level | None=None, errors: IgnoreRaise='ignore') -> Series | None:\n    self._check_copy_deprecation(copy)\n    if axis is not None:\n        axis = self._get_axis_number(axis)\n    if callable(index) or is_dict_like(index):\n        return super()._rename(index, inplace=inplace, level=level, errors=errors)\n    else:\n        return self._set_name(index, inplace=inplace)",
    "docstring": "Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change `user guide copyCopy-on-Write copycopycopyKeyErrordict-like mapperindex` and index is not a dict or callable else None. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename(\"my_name\") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x**2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:rename arg:self arg:index arguments arg arg arg arg arg arg arg Call If Compare Assign Call If BoolOp Call Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "external_values",
    "source_code": "def external_values(values: ArrayLike) -> ArrayLike:\n    if isinstance(values, (PeriodArray, IntervalArray)):\n        return values.astype(object)\n    elif isinstance(values, (DatetimeArray, TimedeltaArray)):\n        values = values._ndarray\n    if isinstance(values, np.ndarray):\n        values = values.view()\n        values.flags.writeable = False\n    return values",
    "docstring": "The array that Series.values returns (public attribute). This has some historical constraints, and is overridden in block subclasses to return the correct array (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray instead of proper extension array).",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:external_values arg:values arguments arg If Call Return return:yes Call If Call Assign If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "tick_params",
    "source_code": "def tick_params(self, axis='both', **kwargs):\n    _api.check_in_list(['x', 'y', 'z', 'both'], axis=axis)\n    if axis in ['x', 'y', 'both']:\n        super().tick_params(axis, **kwargs)\n    if axis in ['z', 'both']:\n        zkw = dict(kwargs)\n        zkw.pop('top', None)\n        zkw.pop('bottom', None)\n        zkw.pop('labeltop', None)\n        zkw.pop('labelbottom', None)\n        self.zaxis.set_tick_params(**zkw)",
    "docstring": "Convenience method for changing the appearance of ticks and tick labels. See for full documentation. Because this function applies to 3D Axes, *axis* can also be set to 'z', and setting *axis* to 'both' autoscales all three axes. Also, because of how Axes3D objects are drawn very differently from regular 2D Axes, some of these settings may have ambiguous meaning. For simplicity, the 'z' axis will accept settings as if it was like the 'y' axis. .. note:: Axes3D currently ignores some of these settings.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:tick_params arg:self arg:axis arguments arg arg arg Call If Compare Call Call If Compare Assign Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_process_quantiles",
    "source_code": "def _process_quantiles(self, x, dim):\n    x = np.asarray(x, dtype=float)\n    if x.ndim == 0:\n        x = x * np.eye(dim)[:, :, np.newaxis]\n    if x.ndim == 1:\n        if dim == 1:\n            x = x[np.newaxis, np.newaxis, :]\n        else:\n            x = np.diag(x)[:, :, np.newaxis]\n    elif x.ndim == 2:\n        if not x.shape[0] == x.shape[1]:\n            raise ValueError(f'Quantiles must be square if they are two dimensional, but x.shape = {str(x.shape)}.')\n        x = x[:, :, np.newaxis]\n    elif x.ndim == 3:\n        if not x.shape[0] == x.shape[1]:\n            raise ValueError(f'Quantiles must be square in the first two dimensions if they are three dimensional, but x.shape = {str(x.shape)}.')\n    elif x.ndim > 3:\n        raise ValueError(f'Quantiles must be at most two-dimensional with an additional dimension for multiple components, but x.ndim = {x.ndim}')\n    if not x.shape[0:2] == (dim, dim):\n        raise ValueError(f'Quantiles have incompatible dimensions: should be {(dim, dim)}, got {x.shape[0:2]}.')\n    return x",
    "docstring": "Adjust quantiles array so that last axis labels the components of each data point.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_quantiles arg:self arg:x arg:dim arguments arg arg arg Assign Call If Compare Assign Call If Compare If Compare Assign Assign Call If Compare If Compare Raise Call Call Assign If Compare If Compare Raise Call Call If Compare Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "prepare_settings",
    "source_code": "def prepare_settings(self, docname: str) -> None:\n    self.current_document = _CurrentDocument(docname=docname, default_role=self.config.default_role, default_domain=self.domains.get(self.config.primary_domain))",
    "docstring": "Prepare to set up environment for reading.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:prepare_settings arg:self arg:docname arguments arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "call",
    "source_code": "def call(self, method_name: str, args: Optional[Sequence[core_tf_types.Tensor]]=None, output_specs=None, timeout_in_ms=0):\n    if args is None:\n        args = []\n    status_or, deleter = gen_rpc_ops.rpc_call(self._client_handle, args=nest.flatten(args), method_name=method_name, timeout_in_ms=timeout_in_ms)\n    return StatusOrResult(status_or, deleter, output_specs)",
    "docstring": "Method to invoke remote registered functions on the connected server. Server should be started before making an RPC Call. Args: method_name: Registered method to invoke on Server. args: Input arguments for the method. output_specs: Output specs for the output from method. timeout_in_ms: Timeout for this call. If 0, default client timeout will be used. Returns: StatusOrResult object. This function issues the RPC call to server, it does not block for the duration of RPC. Please call is_ok, get_error or get_value methods on the returned object to blocked till RPC finishes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\rpc\\rpc_ops.py",
    "ast_data": "FunctionDef name:call arg:self arg:method_name arg:args arg:output_specs arg:timeout_in_ms arguments arg arg arg arg arg If Compare Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "orth",
    "source_code": "@_apply_over_batch(('A', 2))\ndef orth(A, rcond=None):\n    u, s, vh = svd(A, full_matrices=False)\n    M, N = (u.shape[0], vh.shape[1])\n    if rcond is None:\n        rcond = np.finfo(s.dtype).eps * max(M, N)\n    tol = np.amax(s, initial=0.0) * rcond\n    num = np.sum(s > tol, dtype=int)\n    Q = u[:, :num]\n    return Q",
    "docstring": "Construct an orthonormal basis for the range of A using SVD Parameters ---------- A : (M, N) array_like Input array rcond : float, optional Relative condition number. Singular values `` are considered zero. Default: floating point eps * max(M,N). Returns ------- Q : (M, K) ndarray Orthonormal basis for the range of A. K = effective rank of A, as determined by rcond See Also -------- svd : Singular value decomposition of a matrix null_space : Matrix null space Examples -------- >>> import numpy as np >>> from scipy.linalg import orth >>> A = np.array([[2, 0, 0], [0, 5, 0]]) # rank 2 array >>> orth(A) array([[0., 1.], [1., 0.]]) >>> orth(A.T) array([[0., 1.], [1., 0.], [0., 0.]])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_decomp_svd.py",
    "ast_data": "FunctionDef name:orth arg:A arg:rcond arguments arg arg Assign Call Assign If Compare Assign Call Call Assign Call Assign Call Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "report_uninitialized_variables",
    "source_code": "@tf_export(v1=['report_uninitialized_variables'])\n@tf_should_use.should_use_result\ndef report_uninitialized_variables(var_list=None, name='report_uninitialized_variables'):\n    if var_list is None:\n        var_list = global_variables() + local_variables()\n        if not var_list:\n            var_list = []\n            for op in ops.get_default_graph().get_operations():\n                if op.type in ['Variable', 'VariableV2', 'AutoReloadVariable']:\n                    var_list.append(op.outputs[0])\n    with ops.name_scope(name):\n        if var_list:\n            init_vars = [state_ops.is_variable_initialized(v) for v in var_list]\n        local_device = os.environ.get('TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING', '/cpu:0')\n        with ops.device(local_device):\n            if not var_list:\n                return array_ops.constant([], dtype=dtypes.string)\n            else:\n                variables_mask = math_ops.logical_not(array_ops_stack.stack(init_vars))\n                variable_names_tensor = array_ops.constant([s.op.name for s in var_list])\n                return array_ops.boolean_mask(variable_names_tensor, variables_mask)",
    "docstring": "Adds ops to list the names of uninitialized variables. When run, it returns a 1-D tensor containing the names of uninitialized variables if there are any, or an empty array if there are none. Args: var_list: List of objects to check. Defaults to the value of name: Optional name of the . Returns: A 1-D tensor containing names of the uninitialized variables, or an empty 1-D tensor if there are no variables or no uninitialized variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:report_uninitialized_variables arg:var_list arg:name arguments arg arg If Compare Assign Call Call If Assign For Call Call If Compare Call With Call If Assign Call Assign Call With Call If Return return:yes Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "results_from_detections",
    "source_code": "def results_from_detections(detections: Tensor, format: str | BoundingBoxDataFormat) -> list[ObjectDetectorResult]:\n    KORNIA_CHECK_SHAPE(detections, ['D', '6'])\n    if isinstance(format, str):\n        format = BoundingBoxDataFormat[format.upper()]\n    results: list[ObjectDetectorResult] = []\n    for det in detections:\n        det = det.squeeze().tolist()\n        if len(det) != 6:\n            continue\n        results.append(ObjectDetectorResult(class_id=int(det[0]), confidence=det[1], bbox=BoundingBox(data=(det[2], det[3], det[4], det[5]), data_format=format)))\n    return results",
    "docstring": "Convert a detection tensor to a list of :py:class:. Args: detections: tensor with shape :math:, where :math: is the number of detections in the given image, :math: represents class id, score, and bounding box. format: detection format. Returns: list of :py:class:.",
    "type": "function",
    "file_path": "kornia\\kornia\\models\\detection\\base.py",
    "ast_data": "FunctionDef name:results_from_detections arg:detections arg:format arguments arg arg Call If Call Assign Call For Assign Call Call If Compare Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "save_cache_artifacts",
    "source_code": "def save_cache_artifacts() -> Optional[tuple[bytes, 'CacheInfo']]:\n    from ._cache import CacheArtifactManager, CacheInfo\n    return CacheArtifactManager.serialize()",
    "docstring": "Serializes all the cache artifacts that were created during the compilation Example: - Execute torch.compile - Call torch.compiler.save_cache_artifacts()",
    "type": "function",
    "file_path": "pytorch\\torch\\compiler\\__init__.py",
    "ast_data": "FunctionDef name:save_cache_artifacts arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "host_device",
    "source_code": "def host_device(self, replica: int=0, logical_core: int=0, job: Optional[str]=None) -> str:\n    coordinates = self.coordinates(replica, logical_core)\n    return self._topology.cpu_device_name_at_coordinates(coordinates, job=job)",
    "docstring": "Returns the CPU device attached to a logical core.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\device_assignment.py",
    "ast_data": "FunctionDef name:host_device arg:self arg:replica arg:logical_core arg:job arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "read_model_from_bytearray",
    "source_code": "def read_model_from_bytearray(model_bytearray):\n    model = convert_bytearray_to_object(model_bytearray)\n    if sys.byteorder == 'big':\n        byte_swap_tflite_model_obj(model, 'little', 'big')\n    for buffer in model.buffers:\n        if buffer.offset:\n            buffer.data = model_bytearray[buffer.offset:buffer.offset + buffer.size]\n            buffer.offset = 0\n            buffer.size = 0\n    for subgraph in model.subgraphs:\n        for op in subgraph.operators:\n            if op.largeCustomOptionsOffset:\n                op.customOptions = model_bytearray[op.largeCustomOptionsOffset:op.largeCustomOptionsOffset + op.largeCustomOptionsSize]\n                op.largeCustomOptionsOffset = 0\n                op.largeCustomOptionsSize = 0\n    return model",
    "docstring": "Reads a tflite model as a python object. Args: model_bytearray: TFLite model in bytearray format. Returns: A python object corresponding to the input tflite file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:read_model_from_bytearray arg:model_bytearray arguments arg Assign Call If Compare Call For If Assign Assign Assign For For If Assign Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "is_language_prefix_patterns_used",
    "source_code": "@functools.cache\ndef is_language_prefix_patterns_used(urlconf):\n    for url_pattern in get_resolver(urlconf).url_patterns:\n        if isinstance(url_pattern.pattern, LocalePrefixPattern):\n            return (True, url_pattern.pattern.prefix_default_language)\n    return (False, False)",
    "docstring": "Return a tuple of two booleans: ( if i18n_patterns() (LocalePrefixPattern) is used in the URLconf, if the default language should be prefixed )",
    "type": "function",
    "file_path": "django\\django\\conf\\urls\\i18n.py",
    "ast_data": "FunctionDef name:is_language_prefix_patterns_used arg:urlconf arguments arg For Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_process_projection_requirements",
    "source_code": "def _process_projection_requirements(self, *, axes_class=None, polar=False, projection=None, **kwargs):\n    if axes_class is not None:\n        if polar or projection is not None:\n            raise ValueError(\"Cannot combine 'axes_class' and 'projection' or 'polar'\")\n        projection_class = axes_class\n    else:\n        if polar:\n            if projection is not None and projection != 'polar':\n                raise ValueError(f'polar={polar}, yet projection={projection!r}. Only one of these arguments should be supplied.')\n            projection = 'polar'\n        if isinstance(projection, str) or projection is None:\n            projection_class = projections.get_projection_class(projection)\n        elif hasattr(projection, '_as_mpl_axes'):\n            projection_class, extra_kwargs = projection._as_mpl_axes()\n            kwargs.update(**extra_kwargs)\n        else:\n            raise TypeError(f'projection must be a string, None or implement a _as_mpl_axes method, not {projection!r}')\n    return (projection_class, kwargs)",
    "docstring": "Handle the args/kwargs to add_axes/add_subplot/gca, returning:: (axes_proj_class, proj_class_kwargs) which can be used for new Axes initialization/identification.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:_process_projection_requirements arg:self arguments arg arg arg arg arg If Compare If BoolOp Compare Raise Call Assign If If BoolOp Compare Compare Raise Call Assign If BoolOp Call Compare Assign Call If Call Assign Call Call Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_annotation_clip",
    "source_code": "def get_annotation_clip(self):\n    return self._annotation_clip",
    "docstring": "Return the clipping behavior. See for the meaning of the return value.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_annotation_clip arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "forms",
    "source_code": "@cached_property\ndef forms(self):\n    return [self._construct_form(i, **self.get_form_kwargs(i)) for i in range(self.total_form_count())]",
    "docstring": "Instantiate forms at first property access.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:forms arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_reorder_for_extension_array_stack",
    "source_code": "def _reorder_for_extension_array_stack(arr: ExtensionArray, n_rows: int, n_columns: int) -> ExtensionArray:\n    idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.reshape(-1)\n    return arr.take(idx)",
    "docstring": "Re-orders the values when stacking multiple extension-arrays. The indirect stacking method used for EAs requires a followup take to get the order correct. Parameters ---------- arr : ExtensionArray n_rows, n_columns : int The number of rows and columns in the original DataFrame. Returns ------- taken : ExtensionArray The original with elements re-ordered appropriately Examples -------- >>> arr = np.array([\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]) >>> _reorder_for_extension_array_stack(arr, 2, 3) array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='>> _reorder_for_extension_array_stack(arr, 3, 2) array(['a', 'd', 'b', 'e', 'c', 'f'], dtype='<U1')",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\reshape.py",
    "ast_data": "FunctionDef name:_reorder_for_extension_array_stack arg:arr arg:n_rows arg:n_columns arguments arg arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "select_column",
    "source_code": "def select_column(self, key: str, column: str, start: int | None=None, stop: int | None=None):\n    tbl = self.get_storer(key)\n    if not isinstance(tbl, Table):\n        raise TypeError('can only read_column with a table')\n    return tbl.read_column(column=column, start=start, stop=stop)",
    "docstring": "return a single column from the table. This is generally only useful to select an indexable .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the \"fixed\" format. Loading pickled data received from untrusted sources can be unsafe. See: for more. Parameters ---------- key : str column : str The column of interest. start : int or None, default None stop : int or None, default None Raises ------ raises KeyError if the column is not found (or key is not a valid store) raises ValueError if the column can not be extracted individually (it is part of a data block)",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:select_column arg:self arg:key arg:column arg:start arg:stop arguments arg arg arg arg arg Assign Call If Call Raise Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "contingency_matrix",
    "source_code": "@validate_params({'labels_true': ['array-like', None], 'labels_pred': ['array-like', None], 'eps': [Interval(Real, 0, None, closed='left'), None], 'sparse': ['boolean'], 'dtype': 'no_validation'}, prefer_skip_nested_validation=True)\ndef contingency_matrix(labels_true, labels_pred, *, eps=None, sparse=False, dtype=np.int64):\n    if eps is not None and sparse:\n        raise ValueError(\"Cannot set 'eps' when sparse=True\")\n    classes, class_idx = np.unique(labels_true, return_inverse=True)\n    clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)\n    n_classes = classes.shape[0]\n    n_clusters = clusters.shape[0]\n    contingency = sp.coo_matrix((np.ones(class_idx.shape[0]), (class_idx, cluster_idx)), shape=(n_classes, n_clusters), dtype=dtype)\n    if sparse:\n        contingency = contingency.tocsr()\n        contingency.sum_duplicates()\n    else:\n        contingency = contingency.toarray()\n        if eps is not None:\n            contingency = contingency + eps\n    return contingency",
    "docstring": "Build a contingency matrix describing the relationship between labels. Read more in the :ref:. Parameters ---------- labels_true : array-like of shape (n_samples,) Ground truth class labels to be used as a reference. labels_pred : array-like of shape (n_samples,) Cluster labels to evaluate. eps : float, default=None If a float, that value is added to all values in the contingency matrix. This helps to stop NaN propagation. If `TrueepsNonesparseTrueepsNoneCC_{i, j}ij`. Examples -------- >>> from sklearn.metrics.cluster import contingency_matrix >>> labels_true = [0, 0, 1, 1, 2, 2] >>> labels_pred = [1, 0, 2, 1, 0, 2] >>> contingency_matrix(labels_true, labels_pred) array([[1, 1, 0], [0, 1, 1], [1, 0, 1]])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\cluster\\_supervised.py",
    "ast_data": "FunctionDef name:contingency_matrix arg:labels_true arg:labels_pred arguments arg arg arg arg arg If BoolOp Compare Raise Call Assign Call Assign Call Assign Assign Assign Call Call If Assign Call Call Assign Call If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "LoadContext",
    "source_code": "class LoadContext(threading.local):\n\n    def __init__(self):\n        super().__init__()\n        self._entered_load_context = []\n        self._load_options = None\n\n    def set_load_options(self, load_options):\n        self._load_options = load_options\n        self._entered_load_context.append(True)\n\n    def clear_load_options(self):\n        self._load_options = None\n        self._entered_load_context.pop()\n\n    def load_options(self):\n        return self._load_options\n\n    def in_load_context(self):\n        return self._entered_load_context",
    "docstring": "A context for loading a model.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\load_context.py",
    "ast_data": "ClassDef name:LoadContext FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign FunctionDef name:set_load_options arg:self arg:load_options arguments arg arg Assign Call FunctionDef name:clear_load_options arg:self arguments arg Assign Call FunctionDef name:load_options arg:self arguments arg Return return:yes FunctionDef name:in_load_context arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_AllreduceUpcastHookState",
    "source_code": "@dataclass\nclass _AllreduceUpcastHookState:\n    ddp_weakref: Any\n    upcast_stream: torch.Stream\n    wait_for_stream_enqueued: bool = False",
    "docstring": "State to manage DDP mixed precision in backward / gradient communication. This contains a weakref to the DDP module for access to reducer and process group, and a stream to run parameter and gradient upcasts.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\mixed_precision_hooks.py",
    "ast_data": "ClassDef name:_AllreduceUpcastHookState"
  },
  {
    "library": "tensorflow",
    "name": "_default_inner_shape_for_pylist",
    "source_code": "def _default_inner_shape_for_pylist(pylist, ragged_rank):\n\n    def get_inner_shape(item):\n        if not isinstance(item, (list, tuple)) and np.ndim(item) == 0:\n            return ()\n        elif len(item) > 0:\n            return (len(item),) + get_inner_shape(item[0])\n        return (0,)\n\n    def check_inner_shape(item, shape):\n        is_nested = isinstance(item, (list, tuple)) or np.ndim(item) != 0\n        if is_nested != bool(shape):\n            raise ValueError('inner values have inconsistent shape')\n        if is_nested:\n            if shape[0] != len(item):\n                raise ValueError('inner values have inconsistent shape')\n            for child in item:\n                check_inner_shape(child, shape[1:])\n    flat_values = pylist\n    for dim in range(ragged_rank):\n        if not all((isinstance(v, (list, tuple)) or np.ndim(v) != 0 for v in flat_values)):\n            raise ValueError('pylist has scalar values depth %d, but ragged_rank=%d requires scalar value depth greater than %d' % (dim + 1, ragged_rank, ragged_rank))\n        flat_values = sum((list(v) for v in flat_values), [])\n    inner_shape = get_inner_shape(flat_values)\n    check_inner_shape(flat_values, inner_shape)\n    return inner_shape[1:]",
    "docstring": "Computes a default inner shape for the given python list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_factory_ops.py",
    "ast_data": "FunctionDef name:_default_inner_shape_for_pylist arg:pylist arg:ragged_rank arguments arg arg FunctionDef name:get_inner_shape arg:item arguments arg If BoolOp Call Compare Call Return return:no If Compare Call Return return:yes Call Call Return return:yes FunctionDef name:check_inner_shape arg:item arg:shape arguments arg arg Assign BoolOp Call Compare Call If Compare Call Raise Call If If Compare Call Raise Call For Call Assign For Call If Call BoolOp Call Compare Call Raise Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_different_canvas",
    "source_code": "def _different_canvas(self, event):\n    return getattr(event, 'canvas', None) is not None and (fig := self.get_figure(root=True)) is not None and (event.canvas is not fig.canvas)",
    "docstring": "Check whether an *event* occurred on a canvas other that this artist's canvas. If this method returns True, the event definitely occurred on a different canvas; if it returns False, either it occurred on the same canvas, or we may not have enough information to know. Subclasses should start their definition of as follows:: if self._different_canvas(mouseevent): return False, {} # subclass-specific implementation follows",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:_different_canvas arg:self arg:event arguments arg arg Return return:yes BoolOp Compare Call Compare Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_generate_tracing_options",
    "source_code": "def _generate_tracing_options(self, fn, scope_type):\n    attributes = self._attributes.copy()\n    share = self._shared_rendezvous\n    if share is not None:\n        attributes[attributes_lib.SHARED_RENDEZVOUS] = share\n    if self._jit_compile is not None:\n        attributes[attributes_lib.XLA_COMPILE] = bool(self._jit_compile)\n        if self._jit_compile:\n            attributes[attributes_lib.NO_INLINE] = True\n    if self._autograph:\n        fn = autograph_util.py_func_from_autograph(fn, self._experimental_autograph_options)\n    return tracing_compilation.TracingOptions(fn, self._name, polymorphic_type=self._function_type, default_values=self._default_values, scope_type=scope_type, attributes=attributes, autograph=self._autograph, reduce_retracing=self._reduce_retracing, autograph_options=self._experimental_autograph_options, function_cache=self._function_cache, function_captures=self._function_captures, lock=self._lock)",
    "docstring": "Return a TracingOptions catered to the input function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:_generate_tracing_options arg:self arg:fn arg:scope_type arguments arg arg arg Assign Call Assign If Compare Assign If Compare Assign Call If Assign If Assign Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "rfc4514_string",
    "source_code": "def rfc4514_string(self, attr_name_overrides: _OidNameMap | None=None) -> str:\n    return '+'.join((attr.rfc4514_string(attr_name_overrides) for attr in self._attributes))",
    "docstring": "Format as RFC4514 Distinguished Name string. Within each RDN, attributes are joined by '+', although that is rarely used in certificates.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\name.py",
    "ast_data": "FunctionDef name:rfc4514_string arg:self arg:attr_name_overrides arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "startswith",
    "source_code": "@set_module('numpy.strings')\ndef startswith(a, prefix, start=0, end=None):\n    end = end if end is not None else MAX\n    return _startswith_ufunc(a, prefix, start, end)",
    "docstring": "Returns a boolean array which is where the string element in `False`, stop comparing at that position. Returns ------- out : ndarray Output array of bools See Also -------- str.startswith Examples -------- >>> import numpy as np >>> s = np.array(['foo', 'bar']) >>> s array(['foo', 'bar'], dtype='>> np.strings.startswith(s, 'fo') array([True, False]) >>> np.strings.startswith(s, 'o', start=1, end=2) array([True, False])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:startswith arg:a arg:prefix arg:start arg:end arguments arg arg arg arg Assign Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "InternalCastContext",
    "source_code": "class InternalCastContext(trace.CastContext):\n\n    def __init__(self, allow_specs=False):\n        self._allow_specs = allow_specs\n\n    @property\n    def allow_specs(self) -> bool:\n        return self._allow_specs",
    "docstring": "Default casting behaviors.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\trace_type_builder.py",
    "ast_data": "ClassDef name:InternalCastContext FunctionDef name:__init__ arg:self arg:allow_specs arguments arg arg Assign FunctionDef name:allow_specs arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name_scope",
    "source_code": "@doc_controls.do_not_generate_docs\ndef name_scope(name):\n    return ops.name_scope_v2(name)",
    "docstring": "A context manager for use when defining a Python op. This context manager pushes a name scope, which will make the name of all operations added within it have a prefix. For example, to define a new Python op called : def my_op(a): with tf.name_scope(\"MyOp\") as scope: a = tf.convert_to_tensor(a, name=\"a\") # Define some computation that uses . return foo_op(..., name=scope) When executed, the Tensor will have the name . Args: name: The prefix to use on all names created within the name scope. Returns: Name scope context manager.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:name_scope arg:name arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "loggers_set_save_activations",
    "source_code": "def loggers_set_save_activations(model: torch.nn.Module, save_activations: bool) -> None:\n    for _name, child in model.named_modules():\n        if isinstance(child, OutputLogger):\n            child.save_activations = save_activations",
    "docstring": "Sets the setting on a 's loggers",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite_fx.py",
    "ast_data": "FunctionDef name:loggers_set_save_activations arg:model arg:save_activations arguments arg arg For Call If Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "hann_window",
    "source_code": "@tf_export('signal.hann_window')\n@dispatch.add_dispatch_support\ndef hann_window(window_length, periodic=True, dtype=dtypes.float32, name=None):\n    return _raised_cosine_window(name, 'hann_window', window_length, periodic, dtype, 0.5, 0.5)",
    "docstring": "Generate a [Hann window][hann]. Args: window_length: A scalar indicating the window length to generate. periodic: A bool indicating whether to generate a periodic or symmetric window. Periodic windows are typically used for spectral analysis while symmetric windows are typically used for digital filter design. dtype: The data type to produce. Must be a floating point type. name: An optional name for the operation. Returns: A of shape of type . Raises: ValueError: If is not a floating point type. [hann]:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\window_ops.py",
    "ast_data": "FunctionDef name:hann_window arg:window_length arg:periodic arg:dtype arg:name arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_known_dtype",
    "source_code": "def _is_known_dtype(dt):\n    return _is_known_unsigned_by_dtype(dt) or _is_known_signed_by_dtype(dt)",
    "docstring": "Helper returning True if dtype is known.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:_is_known_dtype arg:dt arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "django",
    "name": "do_include",
    "source_code": "@register.tag('include')\ndef do_include(parser, token):\n    bits = token.split_contents()\n    if len(bits) < 2:\n        raise TemplateSyntaxError('%r tag takes at least one argument: the name of the template to be included.' % bits[0])\n    options = {}\n    remaining_bits = bits[2:]\n    while remaining_bits:\n        option = remaining_bits.pop(0)\n        if option in options:\n            raise TemplateSyntaxError('The %r option was specified more than once.' % option)\n        if option == 'with':\n            value = token_kwargs(remaining_bits, parser, support_legacy=False)\n            if not value:\n                raise TemplateSyntaxError('\"with\" in %r tag needs at least one keyword argument.' % bits[0])\n        elif option == 'only':\n            value = True\n        else:\n            raise TemplateSyntaxError('Unknown argument for %r tag: %r.' % (bits[0], option))\n        options[option] = value\n    isolated_context = options.get('only', False)\n    namemap = options.get('with', {})\n    bits[1] = construct_relative_path(parser.origin.template_name, bits[1], allow_recursion=True)\n    return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap, isolated_context=isolated_context)",
    "docstring": "Load a template and render it with the current context. You can pass additional context using keyword arguments. Example:: {% include \"foo/some_include\" %} {% include \"foo/some_include\" with bar=\"BAZZ!\" baz=\"BING!\" %} Use the `` argument to exclude the current context when rendering the included template:: {% include \"foo/some_include\" only %} {% include \"foo/some_include\" with bar=\"1\" only %}",
    "type": "function",
    "file_path": "django\\django\\template\\loader_tags.py",
    "ast_data": "FunctionDef name:do_include arg:parser arg:token arguments arg arg Assign Call If Compare Call Raise Call Assign Assign While Assign Call If Compare Raise Call If Compare Assign Call If Raise Call If Compare Assign Raise Call Assign Assign Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "draw_first_k_couples",
    "source_code": "def draw_first_k_couples(k: int, rdims: Tensor, dv: torch.device) -> Tensor:\n    max_exhaustive_search = int(math.sqrt(2 * k + 0.25) - 0.5)\n    residual_search = int(k - max_exhaustive_search * (max_exhaustive_search + 1) / 2)\n    repeats = torch.cat([torch.arange(max_exhaustive_search, dtype=torch.long, device=dv) + 1, torch.tensor([residual_search], dtype=torch.long, device=dv)])\n    idx_sequence = torch.stack([repeats.repeat_interleave(repeats), arange_sequence(repeats)], dim=-1)\n    return torch.remainder(idx_sequence.unsqueeze(-1), rdims)",
    "docstring": "Returns first k couples. Exhaustive search over the first n samples: * n(n+1)/2 = n2/2 + n/2 couples Max n for which we can exhaustively sample with k couples: * n2/2 + n/2 = k * n = sqrt(1/4 + 2k)-1/2 = (sqrt(8k+1)-1)/2",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\adalam\\utils.py",
    "ast_data": "FunctionDef name:draw_first_k_couples arg:k arg:rdims arg:dv arguments arg arg arg Assign Call Call Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_backend_pattern_config",
    "source_code": "def set_backend_pattern_config(self, config: BackendPatternConfig) -> BackendConfig:\n    pattern_complex_format = torch.ao.quantization.backend_config.utils._get_pattern_in_reversed_nested_tuple_format(config)\n    self._pattern_complex_format_to_config[pattern_complex_format] = config\n    return self",
    "docstring": "Set the config for an pattern that can be run on the target backend. This overrides any existing config for the given pattern.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:set_backend_pattern_config arg:self arg:config arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_asset_to_collection",
    "source_code": "def _add_asset_to_collection(asset_filename, asset_tensor):\n    asset_proto = meta_graph_pb2.AssetFileDef()\n    asset_proto.filename = asset_filename\n    asset_proto.tensor_info.name = asset_tensor.name\n    asset_any_proto = Any()\n    asset_any_proto.Pack(asset_proto)\n    ops.add_to_collection(constants.ASSETS_KEY, asset_any_proto)",
    "docstring": "Builds an asset proto and adds it to the asset collection of the graph. Args: asset_filename: The filename of the asset to be added. asset_tensor: The asset tensor used to populate the tensor info of the asset proto.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:_add_asset_to_collection arg:asset_filename arg:asset_tensor arguments arg arg Assign Call Assign Assign Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "Rt_to_matrix4x4",
    "source_code": "def Rt_to_matrix4x4(R: Tensor, t: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(R, ['B', '3', '3'])\n    KORNIA_CHECK_SHAPE(t, ['B', '3', '1'])\n    Rt = concatenate([R, t], dim=2)\n    return convert_affinematrix_to_homography3d(Rt)",
    "docstring": "Combine 3x3 rotation matrix R and 1x3 translation vector t into 4x4 extrinsics. Args: R: Rotation matrix, :math: t: Translation matrix :math:. Returns: the extrinsics :math:. Example: >>> R, t = torch.eye(3)[None], torch.ones(3).reshape(1, 3, 1) >>> Rt_to_matrix4x4(R, t) tensor([[[1., 0., 0., 1.], [0., 1., 0., 1.], [0., 0., 1., 1.], [0., 0., 0., 1.]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:Rt_to_matrix4x4 arg:R arg:t arguments arg arg Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "breakpoint",
    "source_code": "def breakpoint(rank: int=0, skip: int=0):\n    if skip > 0:\n        key = hash(str(traceback.format_exc()))\n        counter = _breakpoint_cache.get(key, 0) + 1\n        _breakpoint_cache[key] = counter\n        if counter <= skip:\n            log.warning('Skip the breakpoint, counter=%d', counter)\n            return\n    if get_rank() == rank:\n        pdb = _DistributedPdb()\n        pdb.message(f\"\\n!!! ATTENTION !!!\\n\\nType 'up' to get to the frame that called dist.breakpoint(rank={rank})\\n\")\n        pdb.set_trace()\n    meta_in_tls = torch._C._meta_in_tls_dispatch_include()\n    guard = torch._C._DisableTorchDispatch()\n    torch._C._set_meta_in_tls_dispatch_include(False)\n    try:\n        barrier()\n    finally:\n        torch._C._set_meta_in_tls_dispatch_include(meta_in_tls)\n        del guard",
    "docstring": "Set a breakpoint, but only on a single rank. All other ranks will wait for you to be done with the breakpoint before continuing. Args: rank (int): Which rank to break on. Default: ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\__init__.py",
    "ast_data": "FunctionDef name:breakpoint arg:rank arg:skip arguments arg arg If Compare Assign Call Call Call Assign Call Assign If Compare Call Return return:no If Compare Call Assign Call Call Call Assign Call Assign Call Call Try Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "n_iter_",
    "source_code": "@property\ndef n_iter_(self):\n    check_is_fitted(self)\n    return len(self._predictors)",
    "docstring": "Number of iterations of the boosting process.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:n_iter_ arg:self arguments arg Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "handle_extensions",
    "source_code": "def handle_extensions(extensions):\n    ext_list = []\n    for ext in extensions:\n        ext_list.extend(ext.replace(' ', '').split(','))\n    for i, ext in enumerate(ext_list):\n        if not ext.startswith('.'):\n            ext_list[i] = '.%s' % ext_list[i]\n    return set(ext_list)",
    "docstring": "Organize multiple extensions that are separated with commas or passed by using --extension/-e multiple times. For example: running 'django-admin makemessages -e js,txt -e xhtml -a' would result in an extension list: ['.js', '.txt', '.xhtml'] >>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py']) {'.html', '.js', '.py'} >>> handle_extensions(['.html, txt,.tpl']) {'.html', '.tpl', '.txt'}",
    "type": "function",
    "file_path": "django\\django\\core\\management\\utils.py",
    "ast_data": "FunctionDef name:handle_extensions arg:extensions arguments arg Assign For Call Call Call For Call If Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "acquire",
    "source_code": "@abc.abstractmethod\ndef acquire(self, scope_id: str, expiration_time: float) -> None:\n    pass",
    "docstring": "Acquires a timer for the worker that holds this client object given the scope_id and expiration_time. Typically registers the timer with the TimerServer.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "FunctionDef name:acquire arg:self arg:scope_id arg:expiration_time arguments arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "register_backend",
    "source_code": "def register_backend(format, backend, description=None):\n    if description is None:\n        description = ''\n    _default_backends[format] = backend\n    _default_filetypes[format] = description",
    "docstring": "Register a backend for saving to a given file format. Parameters ---------- format : str File extension backend : module string or canvas class Backend for handling file output description : str, default: \"\" Description of the file type.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:register_backend arg:format arg:backend arg:description arguments arg arg arg If Compare Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "Y",
    "source_code": "def Y(self):\n    return '%04d' % self.data.year",
    "docstring": "Year, 4 digits with leading zeros; e.g. '1999'.",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:Y arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_add_timedeltalike_scalar",
    "source_code": "def _add_timedeltalike_scalar(self, other):\n    if isna(other):\n        new_values = np.empty(self.shape, dtype='i8').view(self._ndarray.dtype)\n        new_values.fill(iNaT)\n        return type(self)._simple_new(new_values, dtype=self.dtype)\n    self = cast('DatetimeArray | TimedeltaArray', self)\n    other = Timedelta(other)\n    self, other = self._ensure_matching_resos(other)\n    return self._add_timedeltalike(other)",
    "docstring": "Add a delta of a timedeltalike Returns ------- Same type as self",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_add_timedeltalike_scalar arg:self arg:other arguments arg arg If Call Assign Call Call Call Return return:yes Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "public_key",
    "source_code": "@abc.abstractmethod\ndef public_key(self) -> X25519PublicKey:\n    pass",
    "docstring": "Returns the public key associated with this private key",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x25519.py",
    "ast_data": "FunctionDef name:public_key arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "insert",
    "source_code": "def insert(x, tck, m=1, per=0):\n    if isinstance(tck, BSpline):\n        t, c, k = tck.tck\n        sh = tuple(range(c.ndim))\n        c = c.transpose(sh[1:] + (0,))\n        t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)\n        c_ = np.asarray(c_)\n        c_ = c_.transpose((sh[-1],) + sh[:-1])\n        return BSpline(t_, c_, k_)\n    else:\n        return _impl.insert(x, tck, m, per)",
    "docstring": "Insert knots into a B-spline. .. legacy:: function Specifically, we recommend constructing a object and using its `mxtckBSpline`t(k+1) >> from scipy.interpolate import splrep, insert >>> import numpy as np >>> x = np.linspace(0, 10, 5) >>> y = np.sin(x) >>> tck = splrep(x, y) >>> tck[0] array([ 0., 0., 0., 0., 5., 10., 10., 10., 10.]) A knot is inserted: >>> tck_inserted = insert(3, tck) >>> tck_inserted[0] array([ 0., 0., 0., 0., 3., 5., 10., 10., 10., 10.]) Some knots are inserted: >>> tck_inserted2 = insert(8, tck, m=3) >>> tck_inserted2[0] array([ 0., 0., 0., 0., 5., 8., 8., 8., 10., 10., 10., 10.])",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack_py.py",
    "ast_data": "FunctionDef name:insert arg:x arg:tck arg:m arg:per arguments arg arg arg arg If Call Assign Assign Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "GridDataPeakMem",
    "source_code": "class GridDataPeakMem(Benchmark):\n\n    def setup(self):\n        shape = (7395, 6408)\n        num_nonzero = 488686\n        rng = np.random.default_rng(1234)\n        random_rows = rng.integers(0, shape[0], num_nonzero)\n        random_cols = rng.integers(0, shape[1], num_nonzero)\n        random_values = rng.random(num_nonzero, dtype=np.float32)\n        sparse_matrix = csr_matrix((random_values, (random_rows, random_cols)), shape=shape, dtype=np.float32)\n        sparse_matrix = sparse_matrix.toarray()\n        self.coords = np.column_stack(np.nonzero(sparse_matrix))\n        self.values = sparse_matrix[self.coords[:, 0], self.coords[:, 1]]\n        self.grid_x, self.grid_y = np.mgrid[0:sparse_matrix.shape[0], 0:sparse_matrix.shape[1]]\n\n    def peakmem_griddata(self):\n        interpolate.griddata(self.coords, self.values, (self.grid_x, self.grid_y), method='cubic')",
    "docstring": "Benchmark based on",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\interpolate.py",
    "ast_data": "ClassDef name:GridDataPeakMem FunctionDef name:setup arg:self arguments arg Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Assign FunctionDef name:peakmem_griddata arg:self arguments arg Call"
  },
  {
    "library": "numpy",
    "name": "_checknames",
    "source_code": "def _checknames(descr, names=None):\n    ndescr = len(descr)\n    default_names = [f'f{i}' for i in range(ndescr)]\n    if names is None:\n        new_names = default_names\n    else:\n        if isinstance(names, (tuple, list)):\n            new_names = names\n        elif isinstance(names, str):\n            new_names = names.split(',')\n        else:\n            raise NameError(f'illegal input names {names!r}')\n        nnames = len(new_names)\n        if nnames < ndescr:\n            new_names += default_names[nnames:]\n    ndescr = []\n    for n, d, t in zip(new_names, default_names, descr.descr):\n        if n in reserved_fields:\n            if t[0] in reserved_fields:\n                ndescr.append((d, t[1]))\n            else:\n                ndescr.append(t)\n        else:\n            ndescr.append((n, t[1]))\n    return np.dtype(ndescr)",
    "docstring": "Checks that field names `names` is not None, updates the field names to valid names.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:_checknames arg:descr arg:names arguments arg arg Assign Call Assign Call If Compare Assign If Call Assign If Call Assign Call Raise Call Assign Call If Compare Assign For Call If Compare If Compare Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "ks_2samp",
    "source_code": "@_rename_parameter('mode', 'method')\ndef ks_2samp(data1, data2, alternative='two-sided', method='auto'):\n    alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(alternative.lower()[0], alternative)\n    return scipy.stats._stats_py.ks_2samp(data1, data2, alternative=alternative, method=method)",
    "docstring": "Computes the Kolmogorov-Smirnov test on two samples. Missing values in and/or are discarded. Parameters ---------- data1 : array_like First data set data2 : array_like Second data set alternative : {'two-sided', 'less', 'greater'}, optional Indicates the alternative hypothesis. Default is 'two-sided'. method : {'auto', 'exact', 'asymp'}, optional Defines the method used for calculating the p-value. The following options are available (default is 'auto'): * 'auto' : use 'exact' for small size arrays, 'asymp' for large * 'exact' : use approximation to exact distribution of test statistic * 'asymp' : use asymptotic distribution of test statistic Returns ------- d : float Value of the Kolmogorov Smirnov test p : float Corresponding p-value.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:ks_2samp arg:data1 arg:data2 arg:alternative arg:method arguments arg arg arg arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_rename_parameter",
    "source_code": "def _rename_parameter(old_names, new_names, dep_version=None):\n\n    def decorator(fun):\n\n        @functools.wraps(fun)\n        def wrapper(*args, **kwargs):\n            __tracebackhide__ = True\n            for old_name, new_name in zip(old_names, new_names):\n                if old_name in kwargs:\n                    if dep_version:\n                        end_version = dep_version.split('.')\n                        end_version[1] = str(int(end_version[1]) + 2)\n                        end_version = '.'.join(end_version)\n                        msg = f'Use of keyword argument `{old_name}` is deprecated and replaced by `{new_name}`. Support for `{old_name}` will be removed in NumPy {end_version}.'\n                        warnings.warn(msg, DeprecationWarning, stacklevel=2)\n                    if new_name in kwargs:\n                        msg = f'{fun.__name__}() got multiple values for argument now known as `{new_name}`'\n                        raise TypeError(msg)\n                    kwargs[new_name] = kwargs.pop(old_name)\n            return fun(*args, **kwargs)\n        return wrapper\n    return decorator",
    "docstring": "Generate decorator for backward-compatible keyword renaming. Apply the decorator generated by to functions with a renamed parameter to maintain backward-compatibility. After decoration, the function behaves as follows: If only the new parameter is passed into the function, behave as usual. If only the old parameter is passed into the function (as a keyword), raise a DeprecationWarning if is provided, and behave as usual otherwise. If both old and new parameters are passed into the function, raise a DeprecationWarning if is provided, and raise the appropriate TypeError (function got multiple values for argument). Parameters ---------- old_names : list of str Old names of parameters new_name : list of str New names of parameters dep_version : str, optional Version of NumPy in which old parameter was deprecated in the format 'X.Y.Z'. If supplied, the deprecation message will indicate that support for the old parameter will be removed in version 'X.Y+2.Z' Notes ----- Untested with functions that accept *args. Probably won't work as written.",
    "type": "function",
    "file_path": "numpy\\numpy\\_utils\\__init__.py",
    "ast_data": "FunctionDef name:_rename_parameter arg:old_names arg:new_names arg:dep_version arguments arg arg arg FunctionDef name:decorator arg:fun arguments arg FunctionDef name:wrapper arguments arg arg Assign For Call If Compare If Assign Call Assign Call Call Assign Call Assign Call If Compare Assign Raise Call Assign Call Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "replace_dyn_with_fresh_var",
    "source_code": "def replace_dyn_with_fresh_var(self, typ):\n    if typ == Dyn:\n        new_symbol = Var(next(self.symbol_iter))\n        return new_symbol\n    elif isinstance(typ, TensorType):\n        new_args = [self.replace_dyn_with_fresh_var(a) for a in typ.__args__]\n        return TensorType(tuple(new_args))\n    elif isinstance(typ, list):\n        return [self.replace_dyn_with_fresh_var(t) for t in typ]\n    elif isinstance(typ, tuple):\n        return (self.replace_dyn_with_fresh_var(t) for t in typ)\n    else:\n        return typ",
    "docstring": "Replace all unknown types with fresh type variables.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:replace_dyn_with_fresh_var arg:self arg:typ arguments arg arg If Compare Assign Call Call Return return:yes If Call Assign Call Return return:yes Call Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "epoch",
    "source_code": "@property\ndef epoch(self) -> int:\n    return self._version.epoch",
    "docstring": "The epoch of the version. >>> Version(\"2.0.0\").epoch 0 >>> Version(\"1!2.0.0\").epoch 1",
    "type": "method",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "FunctionDef name:epoch arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_check_compatible_with",
    "source_code": "def _check_compatible_with(self, other: DTScalarOrNaT) -> None:\n    raise AbstractMethodError(self)",
    "docstring": "Verify that and are compatible. * DatetimeArray verifies that the timezones (if any) match * PeriodArray verifies that the freq matches * Timedelta has no verification In each case, NaT is considered compatible. Parameters ---------- other Raises ------ Exception",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_check_compatible_with arg:self arg:other arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_num_buckets",
    "source_code": "@abc.abstractproperty\ndef _num_buckets(self):\n    pass",
    "docstring": "Returns number of buckets in this sparse feature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_num_buckets arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "TSNEBenchmark",
    "source_code": "class TSNEBenchmark(Estimator, Benchmark):\n    param_names = ['method']\n    params = (['exact', 'barnes_hut'],)\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        method, = params\n        n_samples = 500 if method == 'exact' else None\n        return _digits_dataset(n_samples=n_samples)\n\n    def make_estimator(self, params):\n        method, = params\n        estimator = TSNE(random_state=0, method=method)\n        return estimator\n\n    def make_scorers(self):\n        self.train_scorer = lambda _, __: self.estimator.kl_divergence_\n        self.test_scorer = lambda _, __: self.estimator.kl_divergence_",
    "docstring": "Benchmarks for t-SNE.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\manifold.py",
    "ast_data": "ClassDef name:TSNEBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign Assign Compare Return return:yes Call FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Assign arguments arg arg Assign arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "set_handle_props",
    "source_code": "def set_handle_props(self, **handle_props):\n    if not hasattr(self, '_handles_artists'):\n        raise NotImplementedError(\"This selector doesn't have handles.\")\n    artist = self._handles_artists[0]\n    handle_props = cbook.normalize_kwargs(handle_props, artist)\n    for handle in self._handles_artists:\n        handle.set(**handle_props)\n    if self.useblit:\n        self.update()\n    self._handle_props.update(handle_props)",
    "docstring": "Set the properties of the handles selector artist. See the argument in the selector docstring to know which properties are supported.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_handle_props arg:self arguments arg arg If Call Raise Call Assign Assign Call For Call If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_py_set_item",
    "source_code": "def _py_set_item(target, i, x):\n    target[i] = x\n    return target",
    "docstring": "Overload of set_item that executes a Python list modification.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py",
    "ast_data": "FunctionDef name:_py_set_item arg:target arg:i arg:x arguments arg arg arg Assign Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "split_values",
    "source_code": "def split_values(self, value):\n    if isinstance(value, (str, bytes)):\n        values = value.splitlines()\n        if len(values) <= 1:\n            values = value.split(',')\n        values = filter(None, [x.strip() for x in values])\n    else:\n        values = list(value)\n    return values",
    "docstring": "Split the provided value into a list. First this is done by newlines. If there were no newlines in the text, then we next try to split by comma.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\config\\convert.py",
    "ast_data": "FunctionDef name:split_values arg:self arg:value arguments arg arg If Call Assign Call If Compare Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "graph_call_attrs",
    "source_code": "@property\ndef graph_call_attrs(self) -> Dict[str, Any]:\n    attrs = {'is_stateful': self.call_options.is_stateful, 'tout': [o.dtype.as_datatype_enum for o in self.function_type.flat_outputs], 'xla_compile_attr': self.cached_definition.attr.get(attributes_lib.XLA_COMPILE, None)}\n    attrs.update(self._bound_context.function_call_options.as_attrs())\n    return attrs",
    "docstring": "Returns a dictionary of attributes needed to add a call in graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:graph_call_attrs arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, num_groups=2):\n    if num_groups < 1:\n        raise ValueError(f'Argument `num_groups` must be a positive integer. Received: num_groups={num_groups}')\n    self._ready = threading.Condition(threading.Lock())\n    self._num_groups = num_groups\n    self._group_member_counts = [0] * self._num_groups",
    "docstring": "Initialize a group lock. Args: num_groups: The number of groups that will be accessing the resource under consideration. Should be a positive number. Returns: A group lock that can then be used to synchronize code. Raises: ValueError: If num_groups is less than 1.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\lock_util.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_groups arguments arg arg If Compare Raise Call Assign Call Call Assign Assign"
  },
  {
    "library": "sphinx",
    "name": "heading",
    "source_code": "@pass_environment\ndef heading(env: Environment, text: str, level: int=1) -> str:\n    assert level <= 3\n    width = textwidth(text, WIDECHARS[env.language])\n    sectioning_char = SECTIONING_CHARS[level - 1]\n    return f'{text}\\n{sectioning_char * width}'",
    "docstring": "Create a heading for *level*.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\rst.py",
    "ast_data": "FunctionDef name:heading arg:env arg:text arg:level arguments arg arg arg Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "unpack",
    "source_code": "def unpack(self) -> list[BaseSchedulerNode]:\n    for snode in self.snodes:\n        self.scheduler.name_to_fused_node[snode.get_name()] = snode\n    del self.scheduler.name_to_fused_node[self.get_name()]\n    return self.scheduler.fuse_nodes(self.snodes)",
    "docstring": "Do fusion among nodes within this GroupedSchedulerNode, and then unpack this GroupedSchedulerNode into regular nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:unpack arg:self arguments arg For Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_from_proto",
    "source_code": "@classmethod\ndef experimental_from_proto(cls, proto: struct_pb2.TypeSpecProto) -> 'TypeSpec':\n    return nested_structure_coder.decode_proto(struct_pb2.StructuredValue(type_spec_value=proto))",
    "docstring": "Returns a TypeSpec instance based on the serialized proto. Do NOT override for custom non-TF types. Args: proto: Proto generated using 'experimental_as_proto'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:experimental_from_proto arg:cls arg:proto arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_onnx_implemented_overloads",
    "source_code": "def get_onnx_implemented_overloads(registry: _registration.ONNXRegistry) -> list[_registration.TorchOp]:\n    registered_ops: list[_registration.TorchOp] = []\n    for onnx_decomp_meta in registry.functions.values():\n        assert len(onnx_decomp_meta) > 0\n        fx_target = onnx_decomp_meta[0].fx_target\n        registered_ops.append(fx_target)\n    return registered_ops",
    "docstring": "Creates a set of OperatorBase and Callable objects that represent ONNX-supported PyTorch operations. Args: registry: The ONNX registry for PyTorch. Returns: A collection of OperatorBase and Callable objects representing ONNX-supported PyTorch operations.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_decomp.py",
    "ast_data": "FunctionDef name:get_onnx_implemented_overloads arg:registry arguments arg For Call Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_postprocess_for_cut",
    "source_code": "def _postprocess_for_cut(fac, bins, retbins: bool, original):\n    if isinstance(original, ABCSeries):\n        fac = original._constructor(fac, index=original.index, name=original.name)\n    if not retbins:\n        return fac\n    if isinstance(bins, Index) and is_numeric_dtype(bins.dtype):\n        bins = bins._values\n    return (fac, bins)",
    "docstring": "handles post processing for the cut method where we combine the index information if the originally passed datatype was a series",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\tile.py",
    "ast_data": "FunctionDef name:_postprocess_for_cut arg:fac arg:bins arg:retbins arg:original arguments arg arg arg arg If Call Assign Call If Return return:yes If BoolOp Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "handle_template",
    "source_code": "def handle_template(self, template, subdir):\n    if template is None:\n        return os.path.join(django.__path__[0], 'conf', subdir)\n    else:\n        template = template.removeprefix('file://')\n        expanded_template = os.path.expanduser(template)\n        expanded_template = os.path.normpath(expanded_template)\n        if os.path.isdir(expanded_template):\n            return expanded_template\n        if self.is_url(template):\n            absolute_path = self.download(template)\n        else:\n            absolute_path = os.path.abspath(expanded_template)\n        if os.path.exists(absolute_path):\n            return self.extract(absolute_path)\n    raise CommandError(\"couldn't handle %s template %s.\" % (self.app_or_project, template))",
    "docstring": "Determine where the app or project templates are. Use django.__path__[0] as the default because the Django install directory isn't known.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\templates.py",
    "ast_data": "FunctionDef name:handle_template arg:self arg:template arg:subdir arguments arg arg arg If Compare Return return:yes Call Assign Call Assign Call Assign Call If Call Return return:yes If Call Assign Call Assign Call If Call Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "index_of",
    "source_code": "def index_of(self, file_path, line_number, called_function_name, called_file_path, called_function_start_line):\n    location_key = (file_path, called_function_name, line_number)\n    if location_key in self._location_key_to_location:\n        location = self._location_key_to_location[location_key]\n        return location.id\n    else:\n        location_index = len(self._location_key_to_location) + 1\n        location = profile_pb2.Location()\n        location.id = location_index\n        self._location_key_to_location[location_key] = location\n        line = location.line.add()\n        line.function_id = self._functions.index_of(called_file_path, called_function_name, called_function_start_line)\n        line.line = line_number\n        return location_index",
    "docstring": "Returns index of the location, adding the location if needed. Args: file_path: (string) Path to file that makes the call. line_number: (integer) Call line number. called_function_name: (string) Function name of the function called at and . called_file_path: (string) Path to file where the called function is defined. called_function_start_line: (integer) Start line number of called function definition in file. Returns: Index of location.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:index_of arg:self arg:file_path arg:line_number arg:called_function_name arg:called_file_path arg:called_function_start_line arguments arg arg arg arg arg arg Assign If Compare Assign Return return:yes Assign Call Assign Call Assign Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "template_dirs",
    "source_code": "@cached_property\ndef template_dirs(self):\n    template_dirs = tuple(self.dirs)\n    if self.app_dirs:\n        template_dirs += get_app_template_dirs(self.app_dirname)\n    return template_dirs",
    "docstring": "Return a list of directories to search for templates.",
    "type": "method",
    "file_path": "django\\django\\template\\backends\\base.py",
    "ast_data": "FunctionDef name:template_dirs arg:self arguments arg Assign Call If Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_or_set",
    "source_code": "def get_or_set(self, key, default, timeout=DEFAULT_TIMEOUT, version=None):\n    val = self.get(key, self._missing_key, version=version)\n    if val is self._missing_key:\n        if callable(default):\n            default = default()\n        self.add(key, default, timeout=timeout, version=version)\n        return self.get(key, default, version=version)\n    return val",
    "docstring": "Fetch a given key from the cache. If the key does not exist, add the key and set it to the default value. The default value can also be any callable. If timeout is given, use that timeout for the key; otherwise use the default cache timeout. Return the value of the key stored or retrieved.",
    "type": "method",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:get_or_set arg:self arg:key arg:default arg:timeout arg:version arguments arg arg arg arg arg Assign Call If Compare If Call Assign Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_localize_dtensor",
    "source_code": "def _localize_dtensor(module: nn.Module, *_: Any, ignored_params: Optional[set[nn.Parameter]]=None):\n    if ignored_params is None:\n        ignored_params = set()\n    param_list = []\n    for name, param in module.named_parameters():\n        if param in ignored_params:\n            continue\n        t, sharding_info = _flatten_tensor(param)\n        if sharding_info is not None:\n            t = nn.Parameter(t)\n            t._st_info = sharding_info\n            param_list.append((*_get_submodule_n_params(module, name), t))\n    _update_module_param(param_list)",
    "docstring": "Convert DTensor parameters to local tensors",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\parallel\\ddp.py",
    "ast_data": "FunctionDef name:_localize_dtensor arg:module arguments arg arg arg If Compare Assign Call Assign For Call If Compare Assign Call If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "result",
    "source_code": "def result(self):\n    sum_over_row = math_ops.cast(math_ops.reduce_sum(self.total_cm, axis=0), dtype=self._dtype)\n    sum_over_col = math_ops.cast(math_ops.reduce_sum(self.total_cm, axis=1), dtype=self._dtype)\n    true_positives = math_ops.cast(array_ops.tensor_diag_part(self.total_cm), dtype=self._dtype)\n    denominator = sum_over_row + sum_over_col - true_positives\n    num_valid_entries = math_ops.reduce_sum(math_ops.cast(math_ops.not_equal(denominator, 0), dtype=self._dtype))\n    iou = math_ops.div_no_nan(true_positives, denominator)\n    return math_ops.div_no_nan(math_ops.reduce_sum(iou, name='mean_iou'), num_valid_entries)",
    "docstring": "Compute the mean intersection-over-union via the confusion matrix.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:result arg:self arguments arg Assign Call Call Assign Call Call Assign Call Call Assign Assign Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_rename",
    "source_code": "def _maybe_rename(self, parent, node, full_name):\n    new_name = self._api_change_spec.symbol_renames.get(full_name, None)\n    if new_name:\n        self.add_log(INFO, node.lineno, node.col_offset, 'Renamed %r to %r' % (full_name, new_name))\n        new_node = full_name_node(new_name, node.ctx)\n        ast.copy_location(new_node, node)\n        pasta.ast_utils.replace_child(parent, node, new_node)\n        return True\n    else:\n        return False",
    "docstring": "Replace node (Attribute or Name) with a node representing full_name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:_maybe_rename arg:self arg:parent arg:node arg:full_name arguments arg arg arg arg Assign Call If Call Assign Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "EventSource",
    "source_code": "class EventSource(str, Enum):\n    AGENT = 'AGENT'\n    WORKER = 'WORKER'",
    "docstring": "Known identifiers of the event producers.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\events\\api.py",
    "ast_data": "ClassDef name:EventSource Assign Assign"
  },
  {
    "library": "seaborn",
    "name": "set_yticklabels",
    "source_code": "def set_yticklabels(self, labels=None, **kwargs):\n    for ax in self.axes.flat:\n        curr_ticks = ax.get_yticks()\n        ax.set_yticks(curr_ticks)\n        if labels is None:\n            curr_labels = [label.get_text() for label in ax.get_yticklabels()]\n            ax.set_yticklabels(curr_labels, **kwargs)\n        else:\n            ax.set_yticklabels(labels, **kwargs)\n    return self",
    "docstring": "Set y axis tick labels on the left column of the grid.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:set_yticklabels arg:self arg:labels arguments arg arg arg For Assign Call Call If Compare Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run_restore_ops",
    "source_code": "@abc.abstractmethod\ndef run_restore_ops(self, session=None):\n    pass",
    "docstring": "Runs restore ops from the checkpoint. Requires a valid checkpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:run_restore_ops arg:self arg:session arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "_Multinomial",
    "source_code": "class _Multinomial(Constraint):\n    is_discrete = True\n    event_dim = 1\n\n    def __init__(self, upper_bound):\n        self.upper_bound = upper_bound\n\n    def check(self, x):\n        return (x >= 0).all(dim=-1) & (x.sum(dim=-1) <= self.upper_bound)",
    "docstring": "Constrain to nonnegative integer values summing to at most an upper bound. Note due to limitations of the Multinomial distribution, this currently checks the weaker condition ``.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_Multinomial Assign Assign FunctionDef name:__init__ arg:self arg:upper_bound arguments arg arg Assign FunctionDef name:check arg:self arg:x arguments arg arg Return return:yes Call Compare Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_scatter_update",
    "source_code": "def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):\n    per_var_sparse_delta = self._decompose_indexed_slices(sparse_delta)\n    for i, v in enumerate(self._variables):\n        new_name = None\n        if name is not None:\n            new_name = '{}/part_{}'.format(name, i)\n        v.batch_scatter_update(per_var_sparse_delta[i], name=new_name)\n    return self",
    "docstring": "Implements tf.Variable.batch_scatter_update.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:batch_scatter_update arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Assign Call For Call Assign If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "email_user",
    "source_code": "def email_user(self, subject, message, from_email=None, **kwargs):\n    send_mail(subject, message, from_email, [self.email], **kwargs)",
    "docstring": "Send an email to this user.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "FunctionDef name:email_user arg:self arg:subject arg:message arg:from_email arguments arg arg arg arg arg Call"
  },
  {
    "library": "pandas",
    "name": "_downsample",
    "source_code": "def _downsample(self, how, **kwargs):\n    if isinstance(self.ax, DatetimeIndex):\n        return super()._downsample(how, **kwargs)\n    ax = self.ax\n    if is_subperiod(ax.freq, self.freq):\n        return self._groupby_and_aggregate(how, **kwargs)\n    elif is_superperiod(ax.freq, self.freq):\n        if how == 'ohlc':\n            return self._groupby_and_aggregate(how)\n        return self.asfreq()\n    elif ax.freq == self.freq:\n        return self.asfreq()\n    raise IncompatibleFrequency(f'Frequency {ax.freq} cannot be resampled to {self.freq}, as they are not sub or super periods')",
    "docstring": "Downsample the cython defined function. Parameters ---------- how : string / cython mapped function **kwargs : kw args passed to how function",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\resample.py",
    "ast_data": "FunctionDef name:_downsample arg:self arg:how arguments arg arg arg If Call Return return:yes Call Call Assign If Call Return return:yes Call If Call If Compare Return return:yes Call Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "numpy",
    "name": "_ctype_ndarray",
    "source_code": "def _ctype_ndarray(element_type, shape):\n    for dim in shape[::-1]:\n        element_type = dim * element_type\n        element_type.__module__ = None\n    return element_type",
    "docstring": "Create an ndarray of the given element type and shape",
    "type": "function",
    "file_path": "numpy\\numpy\\ctypeslib\\_ctypeslib.py",
    "ast_data": "FunctionDef name:_ctype_ndarray arg:element_type arg:shape arguments arg arg For Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "GradientEdge",
    "source_code": "class GradientEdge(NamedTuple):\n    node: Node\n    output_nr: int",
    "docstring": "Object representing a given gradient edge within the autograd graph. To get the gradient edge where a given Tensor gradient will be computed, you can do ``.",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\graph.py",
    "ast_data": "ClassDef name:GradientEdge"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y, sample_weight=None):\n    X = column_or_1d(X)\n    y = column_or_1d(y)\n    X, y = indexable(X, y)\n    self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)\n    return self",
    "docstring": "Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples,) Training data. y : array-like of shape (n_samples,) Training target. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Returns ------- self : object Returns an instance of self.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\calibration.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "store",
    "source_code": "def store(self, name: str, index: sympy.Expr, value: T, mode: StoreMode=None) -> None:\n    raise NotImplementedError",
    "docstring": "Store 'value' to the memory location 'name' offset by 'expr'. If specified, 'mode' can require the store to be an atomic addition.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:store arg:self arg:name arg:index arg:value arg:mode arguments arg arg arg arg arg Raise"
  },
  {
    "library": "pygame",
    "name": "get_bold",
    "source_code": "def get_bold(self):\n    return self.wide",
    "docstring": "get_bold() -> bool check if text will be rendered bold",
    "type": "method",
    "file_path": "pygame\\src_py\\ftfont.py",
    "ast_data": "FunctionDef name:get_bold arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "activate",
    "source_code": "def activate(language):\n    if not language:\n        return\n    _active.value = translation(language)",
    "docstring": "Fetch the translation object for a given language and install it as the current translation object for the current thread.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:activate arg:language arguments arg If Return return:no Assign Call"
  },
  {
    "library": "pandas",
    "name": "_parse_latex_table_styles",
    "source_code": "def _parse_latex_table_styles(table_styles: CSSStyles, selector: str) -> str | None:\n    for style in table_styles[::-1]:\n        if style['selector'] == selector:\n            return str(style['props'][0][1]).replace('§', ':')\n    return None",
    "docstring": "Return the first 'props' 'value' from ``. Examples -------- >>> table_styles = [ ... {\"selector\": \"foo\", \"props\": [(\"attr\", \"value\")]}, ... {\"selector\": \"bar\", \"props\": [(\"attr\", \"overwritten\")]}, ... {\"selector\": \"bar\", \"props\": [(\"a1\", \"baz\"), (\"a2\", \"ignore\")]}, ... ] >>> _parse_latex_table_styles(table_styles, selector=\"bar\") 'baz' Notes ----- The replacement of \"§\" with \":\" is to avoid the CSS problem where \":\" has structural significance and cannot be used in LaTeX labels, but is often required by them.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_parse_latex_table_styles arg:table_styles arg:selector arguments arg arg For If Compare Return return:yes Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "Optimize",
    "source_code": "@_tf_export('lite.Optimize')\nclass Optimize(enum.Enum):\n    DEFAULT = 'DEFAULT'\n    OPTIMIZE_FOR_SIZE = 'OPTIMIZE_FOR_SIZE'\n    OPTIMIZE_FOR_LATENCY = 'OPTIMIZE_FOR_LATENCY'\n    EXPERIMENTAL_SPARSITY = 'EXPERIMENTAL_SPARSITY'\n\n    def __str__(self):\n        return str(self.value)",
    "docstring": "Enum defining the optimizations to apply when generating a tflite model. DEFAULT The default optimization strategy that enables post-training quantization. The type of post-training quantization that will be used is dependent on the other converter options supplied. Refer to the [documentation]( for further information on the types available and how to use them. OPTIMIZE_FOR_SIZE Deprecated. Does the same as DEFAULT. OPTIMIZE_FOR_LATENCY Deprecated. Does the same as DEFAULT. EXPERIMENTAL_SPARSITY Experimental flag, subject to change. Enable optimization by taking advantage of the sparse model weights trained with pruning. The converter will inspect the sparsity pattern of the model weights and do its best to improve size and latency. The flag can be used alone to optimize float32 models with sparse weights. It can also be used together with the DEFAULT optimization mode to optimize quantized models with sparse weights.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "ClassDef name:Optimize Assign Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_WedgeBbox",
    "source_code": "class _WedgeBbox(mtransforms.Bbox):\n\n    def __init__(self, center, viewLim, originLim, **kwargs):\n        super().__init__([[0, 0], [1, 1]], **kwargs)\n        self._center = center\n        self._viewLim = viewLim\n        self._originLim = originLim\n        self.set_children(viewLim, originLim)\n    __str__ = mtransforms._make_str_method('_center', '_viewLim', '_originLim')\n\n    def get_points(self):\n        if self._invalid:\n            points = self._viewLim.get_points().copy()\n            points[:, 0] *= 180 / np.pi\n            if points[0, 0] > points[1, 0]:\n                points[:, 0] = points[::-1, 0]\n            points[:, 1] -= self._originLim.y0\n            rscale = 0.5 / points[1, 1]\n            points[:, 1] *= rscale\n            width = min(points[1, 1] - points[0, 1], 0.5)\n            wedge = mpatches.Wedge(self._center, points[1, 1], points[0, 0], points[1, 0], width=width)\n            self.update_from_path(wedge.get_path())\n            w, h = self._points[1] - self._points[0]\n            deltah = max(w - h, 0) / 2\n            deltaw = max(h - w, 0) / 2\n            self._points += np.array([[-deltaw, -deltah], [deltaw, deltah]])\n            self._invalid = 0\n        return self._points",
    "docstring": "Transform (theta, r) wedge Bbox into Axes bounding box. Parameters ---------- center : (float, float) Center of the wedge viewLim : Bbox determining the boundaries of the wedge originLim : Bbox determining the origin for the wedge, if different from *viewLim*",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "ClassDef name:_WedgeBbox FunctionDef name:__init__ arg:self arg:center arg:viewLim arg:originLim arguments arg arg arg arg arg Call Call Assign Assign Assign Call Assign Call FunctionDef name:get_points arg:self arguments arg If Assign Call Call If Compare Assign Assign Assign Call Assign Call Call Call Assign Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "magma",
    "source_code": "def magma() -> None:\n    set_cmap('magma')",
    "docstring": "Set the colormap to 'magma'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:magma arguments Call"
  },
  {
    "library": "scipy",
    "name": "safecall",
    "source_code": "def safecall(f, name, *args, **kwargs):\n    lwork = kwargs.get('lwork', None)\n    if lwork in (None, -1):\n        kwargs['lwork'] = -1\n        ret = f(*args, **kwargs)\n        kwargs['lwork'] = ret[-2][0].real.astype(np.int_)\n    ret = f(*args, **kwargs)\n    if ret[-1] < 0:\n        raise ValueError(f'illegal value in {-ret[-1]}th argument of internal {name}')\n    return ret[:-2]",
    "docstring": "Call a LAPACK routine, determining lwork automatically and handling error return values",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_decomp_qr.py",
    "ast_data": "FunctionDef name:safecall arg:f arg:name arguments arg arg arg arg Assign Call If Compare Assign Assign Call Assign Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "flatten_dtype",
    "source_code": "def flatten_dtype(ndtype, flatten_base=False):\n    names = ndtype.names\n    if names is None:\n        if flatten_base:\n            return [ndtype.base] * int(np.prod(ndtype.shape))\n        return [ndtype.base]\n    else:\n        types = []\n        for field in names:\n            info = ndtype.fields[field]\n            flat_dt = flatten_dtype(info[0], flatten_base)\n            types.extend(flat_dt)\n        return types",
    "docstring": "Unpack a structured data-type by collapsing nested fields and/or fields with a shape. Note that the field names are lost. Parameters ---------- ndtype : dtype The datatype to collapse flatten_base : bool, optional If True, transform a field with a shape into several fields. Default is False. Examples -------- >>> import numpy as np >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ... ('block', int, (2, 3))]) >>> np.lib._iotools.flatten_dtype(dt) [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')] >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64'), dtype('int64')]",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "FunctionDef name:flatten_dtype arg:ndtype arg:flatten_base arguments arg arg Assign If Compare If Return return:yes Call Call Return return:yes Assign For Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_logical_device_configuration",
    "source_code": "def get_logical_device_configuration(self, dev):\n    self._initialize_physical_devices()\n    if dev not in self._physical_devices:\n        raise ValueError('Unrecognized device: %s' % repr(dev))\n    return self._virtual_device_map.get(dev)",
    "docstring": "Get the virtual device configuration for a PhysicalDevice.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:get_logical_device_configuration arg:self arg:dev arguments arg arg Call If Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "flatten",
    "source_code": "def flatten(self):\n    flat = {}\n    for d in self.dicts:\n        flat.update(d)\n    return flat",
    "docstring": "Return self.dicts as one dictionary.",
    "type": "method",
    "file_path": "django\\django\\template\\context.py",
    "ast_data": "FunctionDef name:flatten arg:self arguments arg Assign For Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sync",
    "source_code": "@abstractmethod\ndef sync(self) -> Optional[bool]:\n    pass",
    "docstring": "Read or writes the latest state. Returns: A boolean value indicating whether the local state, in case marked as dirty, was successfully synced with other nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:sync arg:self arguments arg"
  },
  {
    "library": "cherrypy",
    "name": "kwargs",
    "source_code": "@property\ndef kwargs(self):\n    return cherrypy.serving.request.kwargs",
    "docstring": "The named kwargs should be accessible from post dispatch hooks.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:kwargs arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_adapted_uniform",
    "source_code": "def _adapted_uniform(shape: Union[Tuple[int, ...], torch.Size], low: Union[float, Tensor], high: Union[float, Tensor], same_on_batch: bool=False) -> Tensor:\n    device, dtype = _extract_device_dtype([low if isinstance(low, Tensor) else None, high if isinstance(high, Tensor) else None])\n    low = as_tensor(low, device=device, dtype=dtype)\n    high = as_tensor(high, device=device, dtype=dtype)\n    dist = Uniform(low, high, validate_args=False)\n    return _adapted_rsampling(shape, dist, same_on_batch)",
    "docstring": "Sample from a uniform sampling function that accepts 'same_on_batch'. If same_on_batch is True, all values generated will be exactly same given a batch_size (shape[0]). By default, same_on_batch is set to False. By default, sampling happens on the default device and dtype. If low/high is a tensor, sampling will happen in the same device/dtype as low/high tensor.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_adapted_uniform arg:shape arg:low arg:high arg:same_on_batch arguments arg arg arg arg Assign Call Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "prepare_run_debug_urls",
    "source_code": "def prepare_run_debug_urls(self, fetches, feed_dict):\n    self._run_counter_lock.acquire()\n    run_dir = os.path.join(self._session_root, 'run_%d_%d' % (int(time.time() * 1000000.0), self._run_counter))\n    self._run_counter += 1\n    self._run_counter_lock.release()\n    gfile.MkDir(run_dir)\n    fetches_event = event_pb2.Event()\n    fetches_event.log_message.message = repr(fetches)\n    fetches_path = os.path.join(run_dir, debug_data.METADATA_FILE_PREFIX + debug_data.FETCHES_INFO_FILE_TAG)\n    with gfile.Open(os.path.join(fetches_path), 'wb') as f:\n        f.write(fetches_event.SerializeToString())\n    feed_keys_event = event_pb2.Event()\n    feed_keys_event.log_message.message = repr(feed_dict.keys()) if feed_dict else repr(feed_dict)\n    feed_keys_path = os.path.join(run_dir, debug_data.METADATA_FILE_PREFIX + debug_data.FEED_KEYS_INFO_FILE_TAG)\n    with gfile.Open(os.path.join(feed_keys_path), 'wb') as f:\n        f.write(feed_keys_event.SerializeToString())\n    return ['file://' + run_dir]",
    "docstring": "Implementation of abstract method in superclass. See doc of for details. This implementation creates a run-specific subdirectory under self._session_root and stores information regarding run and in the subdirectory. Args: fetches: Same as the argument to feed_dict: Same as the argument to Returns: debug_urls: ( or of ) file:// debug URLs to be used in this call.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\dumping_wrapper.py",
    "ast_data": "FunctionDef name:prepare_run_debug_urls arg:self arg:fetches arg:feed_dict arguments arg arg arg Call Assign Call Call Call Call Call Assign Call Assign Call Assign Call With Call Call Call Call Assign Call Assign Call Call Call Assign Call With Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load",
    "source_code": "def load(repo_or_dir, model, *args, source='github', trust_repo=None, force_reload=False, verbose=True, skip_validation=False, **kwargs):\n    source = source.lower()\n    if source not in ('github', 'local'):\n        raise ValueError(f'Unknown source: \"{source}\". Allowed values: \"github\" | \"local\".')\n    if source == 'github':\n        repo_or_dir = _get_cache_or_reload(repo_or_dir, force_reload, trust_repo, 'load', verbose=verbose, skip_validation=skip_validation)\n    model = _load_local(repo_or_dir, model, *args, **kwargs)\n    return model",
    "docstring": "Load a model from a github repo or a local directory. Note: Loading a model is the typical use case, but this can also be used to for loading other objects such as tokenizers, loss functions, etc. If ``. Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB) >>> # from a github repo >>> repo = \"pytorch/vision\" >>> model = torch.hub.load( ... repo, \"resnet50\", weights=\"ResNet50_Weights.IMAGENET1K_V1\" ... ) >>> # from a local directory >>> path = \"/some/local/path/pytorch/vision\" >>> # xdoctest: +SKIP >>> model = torch.hub.load(path, \"resnet50\", weights=\"ResNet50_Weights.DEFAULT\")",
    "type": "function",
    "file_path": "pytorch\\torch\\hub.py",
    "ast_data": "FunctionDef name:load arg:repo_or_dir arg:model arguments arg arg arg arg arg arg arg arg arg Assign Call If Compare Raise Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "convert_affinematrix_to_homography",
    "source_code": "def convert_affinematrix_to_homography(A: Tensor) -> Tensor:\n    if not isinstance(A, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(A)}')\n    if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)):\n        raise ValueError(f'Input matrix must be a Bx2x3 tensor. Got {A.shape}')\n    return _convert_affinematrix_to_homography_impl(A)",
    "docstring": "Convert batch of affine matrices. Args: A: the affine matrix with shape :math:. Returns: the homography matrix with shape of :math:. Examples: >>> A = tensor([[[1., 0., 0.], ... [0., 1., 0.]]]) >>> convert_affinematrix_to_homography(A) tensor([[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:convert_affinematrix_to_homography arg:A arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "values",
    "source_code": "@property\ndef values(self) -> ArrayLike:\n    data = self._data\n    if isinstance(data, np.ndarray):\n        data = data.view()\n        data.flags.writeable = False\n    return data",
    "docstring": "Return an array representing the data in the Index. .. warning:: We recommend using :attr: or :meth:, depending on whether you need a reference to the underlying data or a NumPy array. .. versionchanged:: 3.0.0 The returned array is read-only. Returns ------- array: numpy.ndarray or ExtensionArray See Also -------- Index.array : Reference to the underlying data. Index.to_numpy : A NumPy array representing the underlying data. Examples -------- For :class:: >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.values array([1, 2, 3]) For :class:: >>> idx = pd.interval_range(start=0, end=5) >>> idx.values [(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]] Length: 5, dtype: interval[int64, right]",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:values arg:self arguments arg Assign If Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "RendezvousClosedError",
    "source_code": "class RendezvousClosedError(RendezvousError):\n    pass",
    "docstring": "Raised when a rendezvous is closed.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "ClassDef name:RendezvousClosedError"
  },
  {
    "library": "tensorflow",
    "name": "_name_scope",
    "source_code": "def _name_scope(self):\n    if self._keras_style:\n        return super(Layer, self)._name_scope()\n    return self._current_scope.original_name_scope",
    "docstring": "Determines op naming for the Layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\base.py",
    "ast_data": "FunctionDef name:_name_scope arg:self arguments arg If Return return:yes Call Call Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "Discover",
    "source_code": "class Discover(ABC):\n\n    @classmethod\n    def add_parser_arguments(cls, parser):\n        raise NotImplementedError\n\n    def __init__(self, options) -> None:\n        self._has_run = False\n        self._interpreter = None\n        self._env = options.env\n\n    @abstractmethod\n    def run(self):\n        raise NotImplementedError\n\n    @property\n    def interpreter(self):\n        if self._has_run is False:\n            self._interpreter = self.run()\n            self._has_run = True\n        return self._interpreter",
    "docstring": "Discover and provide the requested Python interpreter.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\discover.py",
    "ast_data": "ClassDef name:Discover FunctionDef name:add_parser_arguments arg:cls arg:parser arguments arg arg Raise FunctionDef name:__init__ arg:self arg:options arguments arg arg Assign Assign Assign FunctionDef name:run arg:self arguments arg Raise FunctionDef name:interpreter arg:self arguments arg If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "poisson2d",
    "source_code": "def poisson2d(N, dtype='d', format=None):\n    if N == 1:\n        diags = asarray([[4]], dtype=dtype)\n        return dia_matrix((diags, [0]), shape=(1, 1)).asformat(format)\n    offsets = array([0, -N, N, -1, 1])\n    diags = empty((5, N ** 2), dtype=dtype)\n    diags[0] = 4\n    diags[1:] = -1\n    diags[3, N - 1::N] = 0\n    diags[4, N::N] = 0\n    return dia_matrix((diags, offsets), shape=(N ** 2, N ** 2)).asformat(format)",
    "docstring": "Return a sparse matrix for the 2D Poisson problem with standard 5-point finite difference stencil on a square N-by-N grid.",
    "type": "function",
    "file_path": "scipy\\benchmarks\\benchmarks\\sparse.py",
    "ast_data": "FunctionDef name:poisson2d arg:N arg:dtype arg:format arguments arg arg arg If Compare Assign Call Return return:yes Call Call Assign Call Assign Call Assign Assign Assign Assign Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_formats",
    "source_code": "def get_formats():\n    FORMAT_SETTINGS = ('DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT', 'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT', 'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR', 'THOUSAND_SEPARATOR', 'NUMBER_GROUPING', 'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS')\n    return {attr: get_format(attr) for attr in FORMAT_SETTINGS}",
    "docstring": "Return all formats strings required for i18n to work.",
    "type": "function",
    "file_path": "django\\django\\views\\i18n.py",
    "ast_data": "FunctionDef name:get_formats arguments Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "rotate_from_to",
    "source_code": "@classmethod\ndef rotate_from_to(cls, r1, r2):\n    k = np.cross(r1, r2)\n    nk = np.linalg.norm(k)\n    th = np.arctan2(nk, np.dot(r1, r2))\n    th /= 2\n    if nk == 0:\n        if np.dot(r1, r2) < 0:\n            warnings.warn('Rotation defined by anti-parallel vectors is ambiguous')\n            k = np.zeros(3)\n            k[np.argmin(r1 * r1)] = 1\n            k = np.cross(r1, k)\n            k = k / np.linalg.norm(k)\n            q = cls(0, k)\n        else:\n            q = cls(1, [0, 0, 0])\n    else:\n        q = cls(np.cos(th), k * np.sin(th) / nk)\n    return q",
    "docstring": "The quaternion for the shortest rotation from vector r1 to vector r2 i.e., q = sqrt(r2*r1'), normalized. If r1 and r2 are antiparallel, then the result is ambiguous; a normal vector will be returned, and a warning will be issued.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:rotate_from_to arg:cls arg:r1 arg:r2 arguments arg arg arg Assign Call Assign Call Assign Call Call If Compare If Compare Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_check_ne_builtin_clash",
    "source_code": "def _check_ne_builtin_clash(expr: Expr) -> None:\n    names = expr.names\n    overlap = names & _ne_builtins\n    if overlap:\n        s = ', '.join([repr(x) for x in overlap])\n        raise NumExprClobberingError(f'Variables in expression \"{expr}\" overlap with builtins: ({s})')",
    "docstring": "Attempt to prevent foot-shooting in a helpful way. Parameters ---------- expr : Expr Terms can contain",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\engines.py",
    "ast_data": "FunctionDef name:_check_ne_builtin_clash arg:expr arguments arg Assign Assign If Assign Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "empty_cache",
    "source_code": "def empty_cache() -> None:\n    return torch._C._mtia_emptyCache()",
    "docstring": "Empty the MTIA device cache.",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:empty_cache arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "initial_forms",
    "source_code": "@property\ndef initial_forms(self):\n    return self.forms[:self.initial_form_count()]",
    "docstring": "Return a list of all the initial forms in this formset.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:initial_forms arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_n_splits",
    "source_code": "def get_n_splits(self, X=None, y=None, groups=None):\n    return len(self.unique_folds)",
    "docstring": "Returns the number of splitting iterations in the cross-validator. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:get_n_splits arg:self arg:X arg:y arg:groups arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_FakeQuantWithMinMaxVarsPerChannelGradient",
    "source_code": "@ops.RegisterGradient('FakeQuantWithMinMaxVarsPerChannel')\ndef _FakeQuantWithMinMaxVarsPerChannelGradient(op: ops.Operation, grad):\n    return fake_quant_with_min_max_vars_per_channel_gradient(grad, op.inputs[0], op.inputs[1], op.inputs[2], num_bits=op.get_attr('num_bits'), narrow_range=op.get_attr('narrow_range'))",
    "docstring": "Gradient for FakeQuantWithMinMaxVarsPerChannel op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_FakeQuantWithMinMaxVarsPerChannelGradient arg:op arg:grad arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_convert_datelike_array",
    "source_code": "def _maybe_convert_datelike_array(self):\n    pa_type = self._pa_array.type\n    if pa.types.is_timestamp(pa_type):\n        return self._to_datetimearray()\n    elif pa.types.is_duration(pa_type):\n        return self._to_timedeltaarray()\n    return self",
    "docstring": "Maybe convert to a datelike array.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:_maybe_convert_datelike_array arg:self arguments arg Assign If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ResourceTracker",
    "source_code": "class ResourceTracker:\n    __slots__ = ['_resources']\n\n    def __init__(self):\n        self._resources = []\n\n    @property\n    def resources(self):\n        return self._resources\n\n    def add_resource(self, resource):\n        self._resources.append(resource)",
    "docstring": "An object that tracks a list of resources.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\resource.py",
    "ast_data": "ClassDef name:ResourceTracker Assign FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:resources arg:self arguments arg Return return:yes FunctionDef name:add_resource arg:self arg:resource arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_valid_dtypes",
    "source_code": "def _assert_valid_dtypes(self, tensors):\n    valid_dtypes = self._valid_dtypes()\n    for t in tensors:\n        dtype = t.dtype.base_dtype\n        if dtype not in valid_dtypes:\n            raise ValueError('Invalid type %r for %s, expected: %s.' % (dtype, t.name, [v for v in valid_dtypes]))",
    "docstring": "Asserts tensors are all valid types (see ). Args: tensors: Tensors to check. Raises: ValueError: If any tensor is not a valid type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_assert_valid_dtypes arg:self arg:tensors arguments arg arg Assign Call For Assign If Compare Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "from_bounds",
    "source_code": "@staticmethod\ndef from_bounds(x0, y0, width, height):\n    return Bbox.from_extents(x0, y0, x0 + width, y0 + height)",
    "docstring": "Create a new from *x0*, *y0*, *width* and *height*. *width* and *height* may be negative.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:from_bounds arg:x0 arg:y0 arg:width arg:height arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "to_list",
    "source_code": "def to_list(self):\n    if isinstance(self._values, RaggedTensorValue):\n        values_as_list = self._values.to_list()\n    else:\n        values_as_list = self._values.tolist()\n    return [values_as_list[self._row_splits[i]:self._row_splits[i + 1]] for i in range(len(self._row_splits) - 1)]",
    "docstring": "Returns this ragged tensor value as a nested Python list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_value.py",
    "ast_data": "FunctionDef name:to_list arg:self arguments arg If Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "merge_other",
    "source_code": "def merge_other(self, app: Sphinx, env: BuildEnvironment, docnames: Set[str], other: BuildEnvironment) -> None:\n    raise NotImplementedError",
    "docstring": "Merge in specified data regarding docnames from a different object which coming from a subprocess in parallel builds. .. seealso:: :event:",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\collectors\\__init__.py",
    "ast_data": "FunctionDef name:merge_other arg:self arg:app arg:env arg:docnames arg:other arguments arg arg arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "load_json_file",
    "source_code": "def load_json_file(file_path: Path) -> Any:\n    with open(file_path) as f:\n        return json.load(f)",
    "docstring": "Returns the deserialized json object",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\file_io_utils.py",
    "ast_data": "FunctionDef name:load_json_file arg:file_path arguments arg With Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_datacopied",
    "source_code": "def _datacopied(arr, original):\n    if arr is original:\n        return False\n    if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):\n        return False\n    return arr.base is None",
    "docstring": "Strict check for not sharing any data with , under the assumption that arr = asarray(original)",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\helper.py",
    "ast_data": "FunctionDef name:_datacopied arg:arr arg:original arguments arg arg If Compare Return return:yes If BoolOp Call Call Return return:yes Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "wrap",
    "source_code": "def wrap(tensor, is_stacked=True, is_sparse_stacked=False):\n    assert isinstance(is_stacked, bool)\n    assert isinstance(is_sparse_stacked, bool)\n    assert isinstance(tensor, tensor_lib.Tensor), type(tensor)\n    assert not is_sparse_stacked or is_stacked, 'If the wrapped tensor is stacked via a sparse conversion, it must also be stacked.'\n    return WrappedTensor(tensor, is_stacked, is_sparse_stacked)",
    "docstring": "Helper to create a WrappedTensor object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:wrap arg:tensor arg:is_stacked arg:is_sparse_stacked arguments arg arg arg Call Call Call Call BoolOp Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "locator",
    "source_code": "@property\ndef locator(self):\n    return self.long_axis.get_major_locator()",
    "docstring": "Major tick for the colorbar.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:locator arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "array_function_errmsg_formatter",
    "source_code": "def array_function_errmsg_formatter(public_api, types):\n    func_name = f'{public_api.__module__}.{public_api.__name__}'\n    return f\"no implementation found for '{func_name}' on types that implement __array_function__: {list(types)}\"",
    "docstring": "Format the error message for when __array_ufunc__ gives up.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:array_function_errmsg_formatter arg:public_api arg:types arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_RoundRobinStrategy",
    "source_code": "class _RoundRobinStrategy:\n\n    def __init__(self, num_tasks):\n        self._num_tasks = num_tasks\n        self._next_task = 0\n\n    def __call__(self, unused_op):\n        task = self._next_task\n        self._next_task = (self._next_task + 1) % self._num_tasks\n        return task",
    "docstring": "Returns the next ps task index for placement in round-robin order. This class is not to be used directly by users. See instead below.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\device_setter.py",
    "ast_data": "ClassDef name:_RoundRobinStrategy FunctionDef name:__init__ arg:self arg:num_tasks arguments arg arg Assign Assign FunctionDef name:__call__ arg:self arg:unused_op arguments arg arg Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "InferStride",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass InferStride:\n    dim: int",
    "docstring": "Denotes the quantity stride[dim] * size[dim], which is what the stride would be for the next physical dimension that results in a contiguous layout. For example, given size = [2, 3], stride = [3, 1], we can replace this with stride = [InferStride(1), 1], because InferStride(1) = stride[1] * size[1] = 1 * 3 = 3 Indirecting the representation in this way is important for the join operation on strides as if we join [2, 3][3, 1] and [2, 4][4, 1], we don't want [2, None][None, 1] which would get eventually symbolized into [2, s0][s1, 1] (notice that the relationship between s0 and s1 is broken). If we instead rewrite the expressions as InferStride so we have [2, 3][InferStride(1), 1] and [2, 4][InferStride(1), 1] we now join to [2, None][InferStride(1), 1] will result in [2, s0][s0, 1], as desired.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\pgo.py",
    "ast_data": "ClassDef name:InferStride Call"
  },
  {
    "library": "pytorch",
    "name": "lift_lowering_attrs_to_nodes",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef lift_lowering_attrs_to_nodes(fx_module: GraphModule) -> None:\n    submodules = dict(fx_module.named_modules())\n    for node in fx_module.graph.nodes:\n        if node.op == 'call_module':\n            if isinstance(submodules[node.target], GraphModule):\n                lift_lowering_attrs_to_nodes(submodules[node.target])\n            else:\n                node.attrs_for_lowering = extract_attrs_for_lowering(submodules[node.target])",
    "docstring": "Recursively traverse all nodes and fetch the module's attributes if the node is a leaf module.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\param_fetch.py",
    "ast_data": "FunctionDef name:lift_lowering_attrs_to_nodes arg:fx_module arguments arg Assign Call Call For If Compare If Call Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "default_storage",
    "source_code": "def default_storage(request):\n    return import_string(settings.MESSAGE_STORAGE)(request)",
    "docstring": "Callable with the same interface as the storage classes. This isn't just default_storage = import_string(settings.MESSAGE_STORAGE) to avoid accessing the settings at the module level.",
    "type": "function",
    "file_path": "django\\django\\contrib\\messages\\storage\\__init__.py",
    "ast_data": "FunctionDef name:default_storage arg:request arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecated('2016-11-30', 'Please switch to tf.summary.FileWriter. The interface and behavior is the same; this is just a rename.')\ndef __init__(self, logdir, graph=None, max_queue=10, flush_secs=120, graph_def=None):\n    super(SummaryWriter, self).__init__(logdir, graph, max_queue, flush_secs, graph_def)",
    "docstring": "Creates a and an event file. This class is deprecated, and should be replaced with tf.summary.FileWriter. On construction the summary writer creates a new event file in . This event file will contain protocol buffers constructed when you call one of the following functions: , , , or . If you pass a to the constructor it is added to the event file. (This is equivalent to calling later). TensorBoard will pick the graph from the file and display it graphically so you can interactively explore the graph you built. You will usually pass the graph from the session in which you launched it: The other arguments to the constructor control the asynchronous writes to the event file: * : How often, in seconds, to flush the added summaries and events to disk. * : Maximum number of summaries or events pending to be written to disk before one of the 'add' calls block. Args: logdir: A string. Directory where event file will be written. graph: A object, such as . max_queue: Integer. Size of the queue for pending events and summaries. flush_secs: Number. How often, in seconds, to flush the pending events and summaries to disk. graph_def: DEPRECATED: Use the argument instead.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\summary_io.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:logdir arg:graph arg:max_queue arg:flush_secs arg:graph_def arguments arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_save_as_bf16",
    "source_code": "@tf_export('experimental.dtensor.enable_save_as_bf16', v1=[])\ndef enable_save_as_bf16(variables: List[tf_variables.Variable]):\n    for v in variables:\n        if isinstance(v, d_variable.DVariable):\n            v.save_as_bf16 = True",
    "docstring": "Allows float32 DVariables to be checkpointed and restored as bfloat16. The method only affects the DVariable part inside the model and leaves non-DTensor Variables/Tensors untouched. Args: variables: A list of tf.Variable to be enabled with bfloat16 save/restore. Only has effect on DTensor Variables as they go through d_variables with DTensor Specific logis.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\save_restore.py",
    "ast_data": "FunctionDef name:enable_save_as_bf16 arg:variables arguments arg For If Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "cc_test_flags",
    "source_code": "@_Cache.me\ndef cc_test_flags(self, flags):\n    assert isinstance(flags, list)\n    self.dist_log('testing flags', flags)\n    test_path = os.path.join(self.conf_check_path, 'test_flags.c')\n    test = self.dist_test(test_path, flags)\n    if not test:\n        self.dist_log('testing failed', stderr=True)\n    return test",
    "docstring": "Returns True if the compiler supports 'flags'.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:cc_test_flags arg:self arg:flags arguments arg arg Call Call Assign Call Assign Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_mpi_available",
    "source_code": "def is_mpi_available() -> bool:\n    return _MPI_AVAILABLE",
    "docstring": "Check if the MPI backend is available.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:is_mpi_available arguments Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_data_path",
    "source_code": "def _get_data_path(*args):\n    return Path(matplotlib.get_data_path(), *args)",
    "docstring": "Return the to a resource file provided by Matplotlib. `` specify a path relative to the base data path.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_get_data_path arguments arg Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "check_skipped_app_config",
    "source_code": "def check_skipped_app_config(self):\n    for sn, app in cherrypy.tree.apps.items():\n        if not isinstance(app, cherrypy.Application):\n            continue\n        if not app.config:\n            msg = 'The Application mounted at %r has an empty config.' % sn\n            if self.global_config_contained_paths:\n                msg += ' It looks like the config you passed to cherrypy.config.update() contains application-specific sections. You must explicitly pass application config via cherrypy.tree.mount(..., config=app_config)'\n            warnings.warn(msg)\n            return",
    "docstring": "Check for mounted Applications that have no config.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpchecker.py",
    "ast_data": "FunctionDef name:check_skipped_app_config arg:self arguments arg For Call If Call If Assign If Call Return return:no"
  },
  {
    "library": "scipy",
    "name": "pmf",
    "source_code": "def pmf(self, k, n, m):\n    self._recalc(n, m)\n    ind = np.floor(k - self.astart).astype(int)\n    return self.freqs[ind] / self.total",
    "docstring": "Probability mass function.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_morestats.py",
    "ast_data": "FunctionDef name:pmf arg:self arg:k arg:n arg:m arguments arg arg arg arg Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "UnsupportedLanguageElementError",
    "source_code": "class UnsupportedLanguageElementError(PyCTError, NotImplementedError):\n    pass",
    "docstring": "Raised for code patterns that AutoGraph does not support.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\errors.py",
    "ast_data": "ClassDef name:UnsupportedLanguageElementError"
  },
  {
    "library": "pytorch",
    "name": "_batch_trace_XXT",
    "source_code": "def _batch_trace_XXT(bmat):\n    n = bmat.size(-1)\n    m = bmat.size(-2)\n    flat_trace = bmat.reshape(-1, m * n).pow(2).sum(-1)\n    return flat_trace.reshape(bmat.shape[:-2])",
    "docstring": "Utility function for calculating the trace of XX^{T} with X having arbitrary trailing batch dimensions",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\kl.py",
    "ast_data": "FunctionDef name:_batch_trace_XXT arg:bmat arguments arg Assign Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "_setup",
    "source_code": "def _setup(self):\n    hooks = cherrypy.serving.request.hooks\n    conf = self._merged_args()\n    p = conf.pop('priority', None)\n    if p is None:\n        p = getattr(self.callable, 'priority', self._priority)\n    hooks.attach(self._point, self.callable, priority=p, **conf)\n    locking = conf.pop('locking', 'implicit')\n    if locking == 'implicit':\n        hooks.attach('before_handler', self._lock_session)\n    elif locking == 'early':\n        hooks.attach('before_request_body', self._lock_session, priority=60)\n    else:\n        pass\n    hooks.attach('before_finalize', _sessions.save)\n    hooks.attach('on_end_request', _sessions.close)",
    "docstring": "Wire this tool into ``. The standard CherryPy request object will automatically call this method when the tool is \"turned on\" in config.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:_setup arg:self arguments arg Assign Assign Call Assign Call If Compare Assign Call Call Assign Call If Compare Call If Compare Call Call Call"
  },
  {
    "library": "django",
    "name": "default_key_func",
    "source_code": "def default_key_func(key, key_prefix, version):\n    return '%s:%s:%s' % (key_prefix, version, key)",
    "docstring": "Default function to generate keys. Construct the key used by all other methods. By default, prepend the . KEY_FUNCTION can be used to specify an alternate function with custom key making behavior.",
    "type": "function",
    "file_path": "django\\django\\core\\cache\\backends\\base.py",
    "ast_data": "FunctionDef name:default_key_func arg:key arg:key_prefix arg:version arguments arg arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ToolQuitAll",
    "source_code": "class ToolQuitAll(ToolBase):\n    description = 'Quit all figures'\n    default_keymap = property(lambda self: mpl.rcParams['keymap.quit_all'])\n\n    def trigger(self, sender, event, data=None):\n        Gcf.destroy_all()",
    "docstring": "Tool to call the figure manager destroy method.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ToolQuitAll Assign Assign Call arguments arg FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "get_aot_graph_name",
    "source_code": "def get_aot_graph_name() -> str:\n    global model_name, graph_being_compiled, nth_graph\n    return f'{model_name}__{'_'.join(graph_being_compiled)}_{nth_graph}'",
    "docstring": "Returns the name of the graph being compiled.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\logging_utils.py",
    "ast_data": "FunctionDef name:get_aot_graph_name arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "probs",
    "source_code": "@property\ndef probs(self):\n    return self._probs",
    "docstring": "Probability of a outcome (vs ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bernoulli.py",
    "ast_data": "FunctionDef name:probs arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_dump_sizes_bytes",
    "source_code": "def get_dump_sizes_bytes(self, node_name, output_slot, debug_op, device_name=None):\n    device_name = self._infer_device_name(device_name, node_name)\n    watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)\n    if watch_key not in self._watch_key_to_datum[device_name]:\n        raise WatchKeyDoesNotExistInDebugDumpDirError('Watch key \"%s\" does not exist in the debug dump of device %s' % (watch_key, device_name))\n    return self._watch_key_to_dump_size_bytes[device_name][watch_key]",
    "docstring": "Get the sizes of the dump files for a debug-dumped tensor. Unit of the file size: byte. Args: node_name: () name of the node that the tensor is produced by. output_slot: () output slot index of tensor. debug_op: () name of the debug op. device_name: () name of the device. If there is only one device or if the specified debug_watch_key exists on only one device, this argument is optional. Returns: ( of ): list of dump file sizes in bytes. Raises: WatchKeyDoesNotExistInDebugDumpDirError: If the tensor watch key does not exist in the debug dump data.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:get_dump_sizes_bytes arg:self arg:node_name arg:output_slot arg:debug_op arg:device_name arguments arg arg arg arg arg Assign Call Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "AxesWidget",
    "source_code": "class AxesWidget(Widget):\n\n    def __init__(self, ax):\n        self.ax = ax\n        self._cids = []\n    canvas = property(lambda self: self.ax.get_figure(root=True).canvas)\n\n    def connect_event(self, event, callback):\n        cid = self.canvas.mpl_connect(event, callback)\n        self._cids.append(cid)\n\n    def disconnect_events(self):\n        for c in self._cids:\n            self.canvas.mpl_disconnect(c)\n\n    def _get_data_coords(self, event):\n        return (event.xdata, event.ydata) if event.inaxes is self.ax else self.ax.transData.inverted().transform((event.x, event.y))",
    "docstring": "Widget connected to a single . To guarantee that the widget remains responsive and not garbage-collected, a reference to the object should be maintained by the user. This is necessary because the callback registry maintains only weak-refs to the functions, which are member functions of the widget. If there are no references to the widget object it may be garbage collected which will disconnect the callbacks. Attributes ---------- ax : The parent Axes for the widget. canvas : The parent figure canvas for the widget. active : bool If False, the widget does not respond to events.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "ClassDef name:AxesWidget FunctionDef name:__init__ arg:self arg:ax arguments arg arg Assign Assign Assign Call arguments arg Call FunctionDef name:connect_event arg:self arg:event arg:callback arguments arg arg arg Assign Call Call FunctionDef name:disconnect_events arg:self arguments arg For Call FunctionDef name:_get_data_coords arg:self arg:event arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "django",
    "name": "total_unique_constraints",
    "source_code": "@cached_property\ndef total_unique_constraints(self):\n    return [constraint for constraint in self.constraints if isinstance(constraint, UniqueConstraint) and constraint.condition is None and (not constraint.contains_expressions)]",
    "docstring": "Return a list of total unique constraints. Useful for determining set of fields guaranteed to be unique for all rows.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\options.py",
    "ast_data": "FunctionDef name:total_unique_constraints arg:self arguments arg Return return:yes BoolOp Call Compare"
  },
  {
    "library": "pytorch",
    "name": "load",
    "source_code": "def load(self, name: str, index: sympy.Expr) -> T:\n    raise NotImplementedError",
    "docstring": "Load from the memory location 'name', offset by some indexing expression 'index'.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:load arg:self arg:name arg:index arguments arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "shape_v1",
    "source_code": "@dispatch.dispatch_for_api(array_ops.shape)\ndef shape_v1(input: StructuredTensor, name=None, out_type=dtypes.int32) -> dynamic_ragged_shape.DynamicRaggedShape:\n    del name\n    return input._ragged_shape.with_dtype(out_type)",
    "docstring": "Returns a DynamicRaggedShape containing the shape of the input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:shape_v1 arg:input arg:name arg:out_type arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "allreduce_hook",
    "source_code": "def allreduce_hook(process_group: dist.ProcessGroup, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:\n    return _allreduce_fut(process_group, bucket.buffer())",
    "docstring": "Call `` callback takes the mean and returns the result. If user registers this DDP communication hook, DDP results is expected to be same as the case where no hook was registered. Hence, this won't change behavior of DDP and user can use this as a reference or modify this hook to log useful information or any other purposes while unaffecting DDP behavior. Example:: >>> # xdoctest: +SKIP >>> ddp_model.register_comm_hook(process_group, allreduce_hook)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\default_hooks.py",
    "ast_data": "FunctionDef name:allreduce_hook arg:process_group arg:bucket arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self):\n    super(TFLiteConverterBaseV2, self).__init__()\n    self.inference_input_type = _dtypes.float32\n    self.inference_output_type = _dtypes.float32\n    self._metadata.environment.apiVersion = 2",
    "docstring": "Constructor for TFLiteConverter.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Call Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_check_input_partition_dims",
    "source_code": "def _check_input_partition_dims(self, tensor, dims):\n    if dims is None:\n        return\n    dims = np.array(dims)\n    if (dims < 1).any():\n        raise ValueError('All input partition dims must be >= 1.')\n    if dims.prod() == 1:\n        return\n    if dims.prod() != self._device_assignment.num_cores_per_replica:\n        raise ValueError('The product of each input partition dim should equal to num_cores_per_replica. (dim = {}, num_cores_per_replica = {})'.format(dims, self._device_assignment.num_cores_per_replica))\n    if dims.shape[0] != tensor.shape.ndims:\n        raise ValueError('Input partition dims must have the same number of dimensions as the `Tensor` to be partitioned. (tensor shape = {}, input partition dims = {}).'.format(tensor.shape.as_list(), dims))\n    tensor.shape.assert_is_fully_defined()",
    "docstring": "Checks that input partition dims are valid for the . Args: tensor: Input tensor for partitioning. dims: A list of integer describes how to partition the input tensor. Raises: ValueError: If the tensor can't be partitioned by dims or the num_cores_per_replica doesn't match the number of partitions(dims.prod()).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:_check_input_partition_dims arg:self arg:tensor arg:dims arguments arg arg arg If Compare Return return:no Assign Call If Call Compare Raise Call If Compare Call Return return:no If Compare Call Raise Call Call If Compare Raise Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "stop",
    "source_code": "def stop(self) -> Deferred[None]:\n\n    @deferred_f_from_coro_f\n    async def _finish_stopping_engine(_: Any) -> None:\n        await self.signals.send_catch_log_async(signal=signals.engine_stopped)\n        self._closewait.callback(None)\n    if not self.running:\n        raise RuntimeError('Engine not running')\n    self.running = False\n    dfd = self.close_spider(self.spider, reason='shutdown') if self.spider is not None else succeed(None)\n    return dfd.addBoth(_finish_stopping_engine)",
    "docstring": "Gracefully stop the execution engine",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\engine.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg AsyncFunctionDef name:_finish_stopping_engine arg:_ arguments arg Call Call If Raise Call Assign Assign Compare Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "save_figure",
    "source_code": "def save_figure(self, *args):\n    raise NotImplementedError",
    "docstring": "Save the current figure. Backend implementations may choose to return the absolute path of the saved file, if any, as a string. If no file is created then is returned. If the backend does not implement this functionality then is returned. Returns ------- str or or The filepath of the saved figure. Returns if figure is not saved. Returns when the backend does not provide the information.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:save_figure arg:self arguments arg arg Raise"
  },
  {
    "library": "scipy",
    "name": "validate_first_step",
    "source_code": "def validate_first_step(first_step, t0, t_bound):\n    if first_step <= 0:\n        raise ValueError('`first_step` must be positive.')\n    if first_step > np.abs(t_bound - t0):\n        raise ValueError('`first_step` exceeds bounds.')\n    return first_step",
    "docstring": "Assert that first_step is valid and return it.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\common.py",
    "ast_data": "FunctionDef name:validate_first_step arg:first_step arg:t0 arg:t_bound arguments arg arg arg If Compare Raise Call If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "export_outputs_for_mode",
    "source_code": "def export_outputs_for_mode(mode, serving_export_outputs=None, predictions=None, loss=None, metrics=None):\n    if mode not in SIGNATURE_KEY_MAP:\n        raise ValueError('Export output type not found for mode: {}. Expected one of: {}.\\n'.format(mode, SIGNATURE_KEY_MAP.keys()))\n    signature_key = SIGNATURE_KEY_MAP[mode]\n    if mode_keys.is_predict(mode):\n        return get_export_outputs(serving_export_outputs, predictions)\n    elif mode_keys.is_train(mode):\n        return {signature_key: export_output_lib.TrainOutput(loss=loss, predictions=predictions, metrics=metrics)}\n    else:\n        return {signature_key: export_output_lib.EvalOutput(loss=loss, predictions=predictions, metrics=metrics)}",
    "docstring": "Util function for constructing a dict given a mode. The returned dict can be directly passed to helper function as the argument, used for generating a SignatureDef map. Args: mode: A specifying the mode. serving_export_outputs: Describes the output signatures to be exported to and used during serving. Should be a dict or None. predictions: A dict of Tensors or single Tensor representing model predictions. This argument is only used if serving_export_outputs is not set. loss: A dict of Tensors or single Tensor representing calculated loss. metrics: A dict of (metric_value, update_op) tuples, or a single tuple. metric_value must be a Tensor, and update_op must be a Tensor or Op Returns: Dictionary mapping the key to an object. The key is the expected SignatureDef key for the mode. Raises: ValueError: if an appropriate ExportOutput cannot be found for the mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_utils.py",
    "ast_data": "FunctionDef name:export_outputs_for_mode arg:mode arg:serving_export_outputs arg:predictions arg:loss arg:metrics arguments arg arg arg arg arg If Compare Raise Call Call Call Assign If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "image_data_format",
    "source_code": "@dispatch.add_dispatch_support\ndef image_data_format():\n    return _IMAGE_DATA_FORMAT",
    "docstring": "Returns the default image data format convention. Returns: A string, either or Example: >>> tf.keras.backend.image_data_format() 'channels_last'",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend_config.py",
    "ast_data": "FunctionDef name:image_data_format arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_all_weights",
    "source_code": "def _create_all_weights(self, var_list):\n    _ = self.iterations\n    self._create_hypers()\n    self._create_slots(var_list)",
    "docstring": "Creates all weights, including iterations, hyperparameters and slot vars. This will add newly created variables to . New variables are only created when this method is called the first time, or when called with different variables in the var_list. Args: var_list: list or tuple of objects that will be minimized using this optimizer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_create_all_weights arg:self arg:var_list arguments arg arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "ROCmConvConfig",
    "source_code": "@dataclasses.dataclass\nclass ROCmConvConfig(ConvConfig):\n    matrix_instr_nonkdim: int = 16\n    waves_per_eu: int = 0\n    kpack: int = 2",
    "docstring": "ROCm subclass for Conv, with AMD backend specific tuneable kernargs",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\template_heuristics.py",
    "ast_data": "ClassDef name:ROCmConvConfig"
  },
  {
    "library": "tensorflow",
    "name": "_create_per_worker_variable",
    "source_code": "def _create_per_worker_variable(self, next_creator, **kwargs):\n    return ps_values.PerWorkerVariable(self._container_strategy(), next_creator, **kwargs)",
    "docstring": "Create an unsynced, unaggregated variable on each worker.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\parameter_server_strategy_v2.py",
    "ast_data": "FunctionDef name:_create_per_worker_variable arg:self arg:next_creator arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_IntegerGreaterThan",
    "source_code": "class _IntegerGreaterThan(Constraint):\n    is_discrete = True\n\n    def __init__(self, lower_bound):\n        self.lower_bound = lower_bound\n        super().__init__()\n\n    def check(self, value):\n        return (value % 1 == 0) & (value >= self.lower_bound)\n\n    def __repr__(self):\n        fmt_string = self.__class__.__name__[1:]\n        fmt_string += f'(lower_bound={self.lower_bound})'\n        return fmt_string",
    "docstring": "Constrain to an integer interval .",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_IntegerGreaterThan Assign FunctionDef name:__init__ arg:self arg:lower_bound arguments arg arg Assign Call Call FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare Compare FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "verify_rsa_sha1",
    "source_code": "def verify_rsa_sha1(request):\n    from .rsa import verify_sha1\n    base_string = generate_signature_base_string(request)\n    sig = binascii.a2b_base64(to_bytes(request.signature))\n    return verify_sha1(sig, to_bytes(base_string), request.rsa_public_key)",
    "docstring": "Verify a RSASSA-PKCS #1 v1.5 base64 encoded signature.",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\signature.py",
    "ast_data": "FunctionDef name:verify_rsa_sha1 arg:request arguments arg Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, estimator, X, y_true, sample_weight=None, **kwargs):\n    if self._deprecation_msg is not None:\n        warnings.warn(self._deprecation_msg, category=DeprecationWarning, stacklevel=2)\n    _raise_for_params(kwargs, self, None)\n    _kwargs = copy.deepcopy(kwargs)\n    if sample_weight is not None:\n        _kwargs['sample_weight'] = sample_weight\n    return self._score(partial(_cached_call, None), estimator, X, y_true, **_kwargs)",
    "docstring": "Evaluate predicted target values for X relative to y_true. Parameters ---------- estimator : object Trained estimator to use for scoring. Must have a predict_proba method; the output of that is used to compute the score. X : {array-like, sparse matrix} Test data that will be fed to estimator.predict. y_true : array-like Gold standard target values for X. sample_weight : array-like of shape (n_samples,), default=None Sample weights. **kwargs : dict Other parameters passed to the scorer. Refer to :func: for more details. Only available if . See the :ref:. .. versionadded:: 1.3 Returns ------- score : float Score function applied to prediction of estimator on X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:estimator arg:X arg:y_true arg:sample_weight arguments arg arg arg arg arg arg If Compare Call Call Assign Call If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "Mishra02",
    "source_code": "class Mishra02(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [1.0 + 1e-09] * self.N))\n        self.global_optimum = [[1.0 for _ in range(self.N)]]\n        self.fglob = 2.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        xn = self.N - sum((x[:-1] + x[1:]) / 2.0)\n        return (1 + xn) ** xn",
    "docstring": "Mishra 2 objective function. This class defines the Mishra 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Mishra02}}({x}) = (1 + x_n)^{x_n} with .. math:: x_n = n - \\sum_{i=1}^{n-1} \\frac{(x_i + x_{i+1})}{2} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:Mishra02 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "sitemap_urls_from_robots",
    "source_code": "def sitemap_urls_from_robots(robots_text: str, base_url: str | None=None) -> Iterable[str]:\n    for line in robots_text.splitlines():\n        if line.lstrip().lower().startswith('sitemap:'):\n            url = line.split(':', 1)[1].strip()\n            yield urljoin(base_url or '', url)",
    "docstring": "Return an iterator over all sitemap urls contained in the given robots.txt file",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\sitemap.py",
    "ast_data": "FunctionDef name:sitemap_urls_from_robots arg:robots_text arg:base_url arguments arg arg For Call If Call Call Call Assign Call Call Call BoolOp"
  },
  {
    "library": "django",
    "name": "escapeseq",
    "source_code": "@register.filter(is_safe=True)\ndef escapeseq(value):\n    return [conditional_escape(obj) for obj in value]",
    "docstring": "An \"escape\" filter for sequences. Mark each element in the sequence, individually, as a string that should be auto-escaped. Return a list with the results.",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:escapeseq arg:value arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_restore_index_levels",
    "source_code": "@final\ndef _maybe_restore_index_levels(self, result: DataFrame) -> None:\n    names_to_restore = []\n    for name, left_key, right_key in zip(self.join_names, self.left_on, self.right_on):\n        if self.orig_left._is_level_reference(left_key) and self.orig_right._is_level_reference(right_key) and (left_key == right_key) and (name not in result.index.names):\n            names_to_restore.append(name)\n    if names_to_restore:\n        result.set_index(names_to_restore, inplace=True)",
    "docstring": "Restore index levels specified as parameters Here we check for cases where and pairs each reference an index level in their respective DataFrames. The joined columns corresponding to these pairs are then restored to the index of . **Note:** This method has side effects. It modifies in-place Parameters ---------- result: DataFrame merge result Returns ------- None",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:_maybe_restore_index_levels arg:self arg:result arguments arg arg Assign For Call If BoolOp Call Call Compare Compare Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, event_writer, graph=None, graph_def=None):\n    self.event_writer = event_writer\n    self._session_run_tags = {}\n    if graph is not None or graph_def is not None:\n        self.add_graph(graph=graph, graph_def=graph_def)\n        maybe_graph_as_def = graph.as_graph_def(add_shapes=True) if isinstance(graph, ops.Graph) else graph\n        self.add_meta_graph(meta_graph.create_meta_graph_def(graph_def=graph_def or maybe_graph_as_def))\n    self._seen_summary_tags = set()",
    "docstring": "Creates a and an event file. On construction the summary writer creates a new event file in . This event file will contain protocol buffers constructed when you call one of the following functions: , , , or . If you pass a to the constructor it is added to the event file. (This is equivalent to calling later). TensorBoard will pick the graph from the file and display it graphically so you can interactively explore the graph you built. You will usually pass the graph from the session in which you launched it: Args: event_writer: An EventWriter. Implements add_event and get_logdir. graph: A object, such as . graph_def: DEPRECATED: Use the argument instead.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:event_writer arg:graph arg:graph_def arguments arg arg arg arg Assign Assign If BoolOp Compare Compare Call Assign Call Call Call Call BoolOp Assign Call"
  },
  {
    "library": "kornia",
    "name": "apply_transform_keypoint",
    "source_code": "def apply_transform_keypoint(self, input: Keypoints, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Keypoints:\n    padding_size = params['padding_size'].to(device=input.device)\n    input = input.pad(padding_size)\n    return super().apply_transform_keypoint(input=input, params=params, flags=flags, transform=transform)",
    "docstring": "Process keypoints corresponding to the inputs that are no transformation applied.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\crop.py",
    "ast_data": "FunctionDef name:apply_transform_keypoint arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "validate_slicing_string",
    "source_code": "def validate_slicing_string(slicing_string):\n    return bool(re.search('^\\\\[(\\\\d|,|\\\\s|:)+\\\\]$', slicing_string))",
    "docstring": "Validate a slicing string. Check if the input string contains only brackets, digits, commas and colons that are valid characters in numpy-style array slicing. Args: slicing_string: (str) Input slicing string to be validated. Returns: (bool) True if and only if the slicing string is valid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py",
    "ast_data": "FunctionDef name:validate_slicing_string arg:slicing_string arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_legacy_signature",
    "source_code": "@property\ndef is_legacy_signature(self) -> bool:\n    return self._is_legacy_signature",
    "docstring": "If the value is from a legacy signature representation. Legacy signature representations include tf.function.input_signature and ConcreteFunction.structured_input_signature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\trace_type_builder.py",
    "ast_data": "FunctionDef name:is_legacy_signature arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "new_file",
    "source_code": "def new_file(self, *args, **kwargs):\n    super().new_file(*args, **kwargs)\n    self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra)",
    "docstring": "Create the file object to append to as data is coming in.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "FunctionDef name:new_file arg:self arguments arg arg arg Call Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "as_unordered",
    "source_code": "def as_unordered(self) -> Self:\n    return self.set_ordered(False)",
    "docstring": "Set the Categorical to be unordered. Returns ------- Categorical Unordered Categorical. See Also -------- as_ordered : Set the Categorical to be ordered. Examples -------- For :class:: >>> raw_cat = pd.Categorical([\"a\", \"b\", \"c\", \"a\"], ordered=True) >>> ser = pd.Series(raw_cat) >>> ser.cat.ordered True >>> ser = ser.cat.as_unordered() >>> ser.cat.ordered False For :class:: >>> ci = pd.CategoricalIndex([\"a\", \"b\", \"c\", \"a\"], ordered=True) >>> ci.ordered True >>> ci = ci.as_unordered() >>> ci.ordered False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:as_unordered arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "relabel_aliases",
    "source_code": "def relabel_aliases(self, change_map):\n    if not change_map:\n        return self\n    for pos, child in enumerate(self.children):\n        if hasattr(child, 'relabel_aliases'):\n            child.relabel_aliases(change_map)\n        elif hasattr(child, 'relabeled_clone'):\n            self.children[pos] = child.relabeled_clone(change_map)",
    "docstring": "Relabel the alias values of any children. 'change_map' is a dictionary mapping old (current) alias values to the new values.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\where.py",
    "ast_data": "FunctionDef name:relabel_aliases arg:self arg:change_map arguments arg arg If Return return:yes For Call If Call Call If Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "scatter_object",
    "source_code": "def scatter_object(self, object_list: Optional[list[T]]) -> T:\n    if self.use_dist:\n        gather_result = cast(list[T], [None])\n        dist.scatter_object_list(scatter_object_output_list=gather_result, scatter_object_input_list=object_list if self.is_coordinator else None, src=self.global_coordinator_rank, group=self.group)\n        local_reply = gather_result[0]\n    else:\n        assert object_list is not None\n        local_reply = object_list[0]\n    return local_reply",
    "docstring": "Implement functionality similar to c10d::scatter_object but without distributed enabled.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\utils.py",
    "ast_data": "FunctionDef name:scatter_object arg:self arg:object_list arguments arg arg If Assign Call Call Assign Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "random_poisson",
    "source_code": "@tf_export(v1=['random.poisson', 'random_poisson'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('random_poisson')\ndef random_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None):\n    return random_poisson_v2(shape, lam, dtype, seed, name)",
    "docstring": "Draws samples from each of the given Poisson distribution(s). is the rate parameter describing the distribution(s). Example: Args: lam: A Tensor or Python value or N-D array of type . provides the rate parameter(s) describing the poisson distribution(s) to sample. shape: A 1-D integer Tensor or Python array. The shape of the output samples to be drawn per \"rate\"-parameterized distribution. dtype: The type of the output: , , , or . seed: A Python integer. Used to create a random seed for the distributions. See for behavior. name: Optional name for the operation. Returns: samples: a of shape with values of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py",
    "ast_data": "FunctionDef name:random_poisson arg:lam arg:shape arg:dtype arg:seed arg:name arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "ObjectDetectorResult",
    "source_code": "@dataclass(frozen=True)\nclass ObjectDetectorResult:\n    class_id: int\n    confidence: float\n    bbox: BoundingBox",
    "docstring": "Object detection result. Args: class_id: class id of the detected object. confidence: confidence score of the detected object. bbox: bounding box of the detected object in xywh format.",
    "type": "class",
    "file_path": "kornia\\kornia\\models\\detection\\base.py",
    "ast_data": "ClassDef name:ObjectDetectorResult Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add(estimator=self._get_estimator(), method_mapping=MethodMapping().add(caller='fit', callee='fit'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.3 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_materialize_cpp_cia_ops",
    "source_code": "def _materialize_cpp_cia_ops() -> None:\n    cia_ops = torch._C._dispatch_get_registrations_for_dispatch_key('CompositeImplicitAutograd')\n    for op in cia_ops:\n        namespace, op_name = tuple(op.split('::'))\n        split_list = op_name.split('.')\n        assert len(split_list) == 1 or len(split_list) == 2\n        op_name = split_list[0]\n        op_overload_name = 'default'\n        if len(split_list) == 2:\n            op_overload_name = split_list[1]\n        _ = getattr(getattr(getattr(torch.ops, namespace), op_name), op_overload_name)",
    "docstring": "Utility function to query C++ dispatcher to get the all possible CIA ops and populate them into torch.ops namespace",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:_materialize_cpp_cia_ops arguments Assign Call For Assign Call Call Assign Call BoolOp Compare Call Compare Call Assign Assign If Compare Call Assign Assign Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "score_samples",
    "source_code": "def score_samples(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, order='C', dtype=np.float64, reset=False)\n    if self.tree_.sample_weight is None:\n        N = self.tree_.data.shape[0]\n    else:\n        N = self.tree_.sum_weight\n    atol_N = self.atol * N\n    log_density = self.tree_.kernel_density(X, h=self.bandwidth_, kernel=self.kernel, atol=atol_N, rtol=self.rtol, breadth_first=self.breadth_first, return_log=True)\n    log_density -= np.log(N)\n    return log_density",
    "docstring": "Compute the log-likelihood of each sample under the model. Parameters ---------- X : array-like of shape (n_samples, n_features) An array of points to query. Last dimension should match dimension of training data (n_features). Returns ------- density : ndarray of shape (n_samples,) Log-likelihood of each sample in . These are normalized to be probability densities, so values will be low for high-dimensional data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_kde.py",
    "ast_data": "FunctionDef name:score_samples arg:self arg:X arguments arg arg Call Assign Call If Compare Assign Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "empty_gpu_cache",
    "source_code": "def empty_gpu_cache(device):\n    if device not in ['cuda', 'xpu', 'mps']:\n        log.warning('Trying to call the empty_gpu_cache for device: %s, which is not in list [cuda, xpu]', device)\n        return\n    getattr(torch, device).empty_cache()",
    "docstring": "Explicitly empty gpu cache to avoid OOM in subsequent run.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\dynamo\\common.py",
    "ast_data": "FunctionDef name:empty_gpu_cache arg:device arguments arg If Compare Call Return return:no Call Call"
  },
  {
    "library": "kornia",
    "name": "check_matrix_shape",
    "source_code": "def check_matrix_shape(matrix: Tensor, matrix_type: str='R') -> None:\n    target_shapes = []\n    if matrix_type == 'R':\n        target_shapes = [[2, 2], [3, 3]]\n    elif matrix_type == 'RT':\n        target_shapes = [[3, 3], [4, 4]]\n    if len(matrix.shape) > 3 or len(matrix.shape) < 2 or list(matrix.shape[-2:]) not in target_shapes:\n        raise ValueError(f'{matrix_type} must be either {target_shapes[0]}x{target_shapes[0]} or               {target_shapes[1]}x{target_shapes[1]}, got {matrix.shape}')",
    "docstring": "Verify matrix shape based on type.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\pose.py",
    "ast_data": "FunctionDef name:check_matrix_shape arg:matrix arg:matrix_type arguments arg arg Assign If Compare Assign If Compare Assign If BoolOp Compare Call Compare Call Compare Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "EvalOutput",
    "source_code": "class EvalOutput(_SupervisedOutput):\n\n    def _get_signature_def_fn(self):\n        return signature_def_utils.supervised_eval_signature_def",
    "docstring": "Represents the output of a supervised eval process. This class generates the appropriate signature def for exporting eval output by type-checking and wrapping loss, predictions, and metrics values.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "ClassDef name:EvalOutput FunctionDef name:_get_signature_def_fn arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "get_line",
    "source_code": "def get_line(self, lineno: int) -> str:\n    return self.buffers[lineno - 1]",
    "docstring": "Returns specified line.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:get_line arg:self arg:lineno arguments arg arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "herm2poly",
    "source_code": "def herm2poly(c):\n    from .polynomial import polyadd, polymulx, polysub\n    [c] = pu.as_series([c])\n    n = len(c)\n    if n == 1:\n        return c\n    if n == 2:\n        c[1] *= 2\n        return c\n    else:\n        c0 = c[-2]\n        c1 = c[-1]\n        for i in range(n - 1, 1, -1):\n            tmp = c0\n            c0 = polysub(c[i - 2], c1 * (2 * (i - 1)))\n            c1 = polyadd(tmp, polymulx(c1) * 2)\n        return polyadd(c0, polymulx(c1) * 2)",
    "docstring": "Convert a Hermite series to a polynomial. Convert an array representing the coefficients of a Hermite series, ordered from lowest degree to highest, to an array of the coefficients of the equivalent polynomial (relative to the \"standard\" basis) ordered from lowest to highest degree. Parameters ---------- c : array_like 1-D array containing the Hermite series coefficients, ordered from lowest order term to highest. Returns ------- pol : ndarray 1-D array containing the coefficients of the equivalent polynomial (relative to the \"standard\" basis) ordered from lowest order term to highest. See Also -------- poly2herm Notes ----- The easy way to do conversions between polynomial basis sets is to use the convert method of a class instance. Examples -------- >>> from numpy.polynomial.hermite import herm2poly >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) array([0., 1., 2., 3.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite.py",
    "ast_data": "FunctionDef name:herm2poly arg:c arguments arg Assign Call Assign Call If Compare Return return:yes If Compare Return return:yes Assign Assign For Call Assign Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "config_prefix",
    "source_code": "@contextmanager\ndef config_prefix(prefix: str) -> Generator[None]:\n    global register_option, get_option, set_option\n\n    def wrap(func: F) -> F:\n\n        def inner(key: str, *args, **kwds):\n            pkey = f'{prefix}.{key}'\n            return func(pkey, *args, **kwds)\n        return cast(F, inner)\n    _register_option = register_option\n    _get_option = get_option\n    _set_option = set_option\n    set_option = wrap(set_option)\n    get_option = wrap(get_option)\n    register_option = wrap(register_option)\n    try:\n        yield\n    finally:\n        set_option = _set_option\n        get_option = _get_option\n        register_option = _register_option",
    "docstring": "contextmanager for multiple invocations of API with a common prefix supported API functions: (register / get / set )__option Warning: This is not thread - safe, and won't work properly if you import the API functions into your module using the \"from x import y\" construct. Example ------- import pandas._config.config as cf with cf.config_prefix(\"display.font\"): cf.register_option(\"color\", \"red\") cf.register_option(\"size\", \" 5 pt\") cf.set_option(size, \" 6 pt\") cf.get_option(size) ... etc' will register options \"display.font.color\", \"display.font.size\", set the value of \"display.font.size\"... and so on.",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:config_prefix arg:prefix arguments arg FunctionDef name:wrap arg:func arguments arg FunctionDef name:inner arg:key arguments arg arg arg Assign Return return:yes Call Return return:yes Call Assign Assign Assign Assign Call Assign Call Assign Call Try Assign Assign Assign"
  },
  {
    "library": "cherrypy",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    return self",
    "docstring": "Make an app response iterator.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_unshard_params",
    "source_code": "@contextlib.contextmanager\ndef _unshard_params(module: nn.Module, recurse: bool, writeback: bool, rank0_only: bool, offload_to_cpu: bool, with_grads: bool):\n    if not recurse:\n        optional_state = _get_module_fsdp_state(module)\n        if optional_state is None:\n            with contextlib.nullcontext():\n                yield\n            return\n        states_and_modules = ([optional_state], [module])\n    else:\n        states_and_modules = traversal_utils._get_fsdp_states_with_modules(module)\n    with contextlib.ExitStack() as stack:\n        for state, module in zip(*states_and_modules):\n            stack.enter_context(_unshard_params_for_summon(module=module, state=state, writeback=writeback, rank0_only=rank0_only, offload_to_cpu=offload_to_cpu, with_grads=with_grads))\n        yield",
    "docstring": "This unshards FSDP-managed parameters for all modules with FSDP applied in the module tree rooted at ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_unshard_param_utils.py",
    "ast_data": "FunctionDef name:_unshard_params arg:module arg:recurse arg:writeback arg:rank0_only arg:offload_to_cpu arg:with_grads arguments arg arg arg arg arg arg If Assign Call If Compare With Call Return return:no Assign Assign Call With Call For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "check_dependency",
    "source_code": "def check_dependency(partition):\n    visited: set[Partition] = {partition}\n    queue: deque[Partition] = deque([partition])\n    while queue:\n        p = queue.popleft()\n        for child in p.children:\n            if child == partition:\n                return True\n            elif child not in visited:\n                visited.add(child)\n                queue.append(child)\n    return False",
    "docstring": "Given a partition,check if there is a circular dependency on this partition using bfs",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:check_dependency arg:partition arguments arg Call While Assign Call For If Compare Return return:yes If Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "generate_c_shim_extern_kernel_call",
    "source_code": "def generate_c_shim_extern_kernel_call(self, kernel: str, args: list[str], device: str, *, debug_args: Optional[list[str]]=None) -> None:\n    self.add_device_include(device)\n    debug_printer_manager = V.graph.wrapper_code.debug_printer\n    debug_printer_manager.set_printer_args(debug_args if debug_args is not None else args, kernel, None, None, 'extern')\n    enable_kernel_profile = config.cpp.enable_kernel_profile and sys.platform in ['linux', 'win32']\n    with debug_printer_manager:\n        shim_fn = self.get_c_shim_func_name(kernel, device)\n        shim_fn_codes = f'AOTI_TORCH_ERROR_CODE_CHECK({shim_fn}({', '.join(args)}));'\n        if enable_kernel_profile:\n            shim_fn_codes = textwrap.dedent(f'\\n                    {{\\n                      RECORD_FUNCTION(\"{shim_fn}\", c10::ArrayRef<c10::IValue>());\\n                      {shim_fn_codes}\\n                    }}\\n                    ')\n        self.writeline(shim_fn_codes)",
    "docstring": "debug_args kwarg allows CppWrapperCpuArrayRef to pass in wrapped arguments in place of args while preserving debug printer output.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_wrapper_cpu.py",
    "ast_data": "FunctionDef name:generate_c_shim_extern_kernel_call arg:self arg:kernel arg:args arg:device arguments arg arg arg arg arg Call Assign Call Compare Assign BoolOp Compare With Assign Call Assign Call If Assign Call Call"
  },
  {
    "library": "kornia",
    "name": "SOLD2Net",
    "source_code": "class SOLD2Net(Module):\n\n    def __init__(self, model_cfg: Dict[str, Any]) -> None:\n        super().__init__()\n        self.cfg = model_cfg\n        self.backbone_net = HourglassBackbone(**self.cfg['backbone_cfg'])\n        feat_channel = 256\n        self.junction_decoder = SuperpointDecoder(feat_channel, self.cfg['grid_size'])\n        self.heatmap_decoder = PixelShuffleDecoder(feat_channel, num_upsample=2)\n        if 'use_descriptor' in self.cfg:\n            self.descriptor_decoder = SuperpointDescriptor(feat_channel)\n\n    def forward(self, input_images: Tensor) -> Dict[str, Tensor]:\n        features = self.backbone_net(input_images)\n        junctions = self.junction_decoder(features)\n        heatmaps = self.heatmap_decoder(features)\n        outputs = {'junctions': junctions, 'heatmap': heatmaps}\n        if 'use_descriptor' in self.cfg:\n            outputs['descriptors'] = self.descriptor_decoder(features)\n        return outputs",
    "docstring": "Full network for SOLD². Args: model_cfg: the configuration as a Dict. Returns: a Dict with the following values: junctions: heatmap of junctions. heatmap: line heatmap. descriptors: semi-dense descriptors.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\sold2\\backbones.py",
    "ast_data": "ClassDef name:SOLD2Net FunctionDef name:__init__ arg:self arg:model_cfg arguments arg arg Call Call Assign Assign Call Assign Assign Call Assign Call If Compare Assign Call FunctionDef name:forward arg:self arg:input_images arguments arg arg Assign Call Assign Call Assign Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "silent_list",
    "source_code": "class silent_list(list):\n\n    def __init__(self, type, seq=None):\n        self.type = type\n        if seq is not None:\n            self.extend(seq)\n\n    def __repr__(self):\n        if self.type is not None or len(self) != 0:\n            tp = self.type if self.type is not None else type(self[0]).__name__\n            return f'<a list of {len(self)} {tp} objects>'\n        else:\n            return '<an empty list>'",
    "docstring": "A list with a short `` is None, the type name is obtained from the first item in the list (if any).",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "ClassDef name:silent_list FunctionDef name:__init__ arg:self arg:type arg:seq arguments arg arg arg Assign If Compare Call FunctionDef name:__repr__ arg:self arguments arg If BoolOp Compare Compare Call Assign Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "split",
    "source_code": "def split(self, input: str) -> list[str]:\n    raise NotImplementedError",
    "docstring": ":param str input: :return: :rtype: list[str]",
    "type": "method",
    "file_path": "sphinx\\sphinx\\search\\ja.py",
    "ast_data": "FunctionDef name:split arg:self arg:input arguments arg arg Raise"
  },
  {
    "library": "django",
    "name": "copy",
    "source_code": "def copy(self):\n    return copy.copy(self)",
    "docstring": "Return a shallow copy of this object.",
    "type": "method",
    "file_path": "django\\django\\utils\\datastructures.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "diag",
    "source_code": "def diag(self, X):\n    return np.apply_along_axis(self, 1, X).ravel()",
    "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:diag arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "MultiOutputLine",
    "source_code": "@dataclasses.dataclass\nclass MultiOutputLine(WrapperLine):\n    wrapper: PythonWrapperCodegen\n    result_name: str\n    arg_name: str\n    indices: Sequence[Any]\n\n    def codegen(self, code: IndentedBuffer) -> None:\n\n        def codegen_list_tuple_access(basename, indices):\n            if len(indices) > 0:\n                itype, i = indices[0]\n                if issubclass(itype, list):\n                    return codegen_list_tuple_access(f'{basename}[{i}]', indices[1:])\n                elif issubclass(itype, tuple):\n                    tuple_access = self.wrapper.codegen_tuple_access(basename, self.result_name, str(i))\n                    return codegen_list_tuple_access(tuple_access, indices[1:])\n                elif issubclass(itype, dict):\n                    return codegen_list_tuple_access(f\"{basename}['{i}']\", indices[1:])\n                else:\n                    raise AssertionError('non supported index type: ', itype)\n            else:\n                return basename\n        value = codegen_list_tuple_access(self.arg_name, self.indices)\n        code.writeline(f'{self.wrapper.declare}{self.result_name} = {value}{self.wrapper.ending}')\n\n    def codegen_fx(self, converter: FxConverter) -> FxConversionFunc:\n        return converter._generate_multi_output",
    "docstring": "Given a MultiOutputLayout buffer, indexes actual buffer(s) from the result.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py",
    "ast_data": "ClassDef name:MultiOutputLine FunctionDef name:codegen arg:self arg:code arguments arg arg FunctionDef name:codegen_list_tuple_access arg:basename arg:indices arguments arg arg If Compare Call Assign If Call Return return:yes Call If Call Assign Call Call Return return:yes Call If Call Return return:yes Call Raise Call Return return:yes Assign Call Call FunctionDef name:codegen_fx arg:self arg:converter arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_joinstyle",
    "source_code": "def get_joinstyle(self):\n    return self._joinstyle.name",
    "docstring": "Return the joinstyle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_joinstyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "unpad",
    "source_code": "def unpad(self, padding_size: Tensor) -> Boxes:\n    if not (len(padding_size.shape) == 2 and padding_size.size(1) == 4):\n        raise RuntimeError(f'Expected padding_size as (B, 4). Got {padding_size.shape}.')\n    self._data[..., 0] -= padding_size[..., None, :1].to(device=self._data.device)\n    self._data[..., 1] -= padding_size[..., None, 2:3].to(device=self._data.device)\n    return self",
    "docstring": "Pad a bounding box. Args: padding_size: (B, 4)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:unpad arg:self arg:padding_size arguments arg arg If BoolOp Compare Call Compare Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, seq_module):\n    super().__init__()\n    self.seq_module = seq_module",
    "docstring": "Adds padding to the output of the module based on the given lengths. This is to ensure that the results of the model do not change when batch sizes change during inference. Input needs to be in the shape of (BxCxDxT) :param seq_module: The sequential module containing the conv stack.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:seq_module arguments arg arg Call Call Assign"
  },
  {
    "library": "seaborn",
    "name": "default_range",
    "source_code": "@property\ndef default_range(self) -> tuple[float, float]:\n    base = mpl.rcParams['font.size']\n    return (base * 0.5, base * 2)",
    "docstring": "Min and max values used by default for semantic mapping.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:default_range arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "resize_to_be_divisible",
    "source_code": "def resize_to_be_divisible(input: Tensor, divisible_factor: int, interpolation: str='bilinear', align_corners: Optional[bool]=None, side: str='short', antialias: bool=False) -> Tensor:\n    if isinstance(input, Tensor) and len(input.shape) == 4:\n        height, width = (input.shape[2], input.shape[3])\n    if isinstance(input, Tensor) and len(input.shape) == 3:\n        height, width = (input.shape[1], input.shape[2])\n    height = round(height / divisible_factor) * divisible_factor\n    width = round(width / divisible_factor) * divisible_factor\n    return resize(input, (height, width), interpolation, align_corners, side, antialias)",
    "docstring": "Resize the input tensor to be divisible by a certain factor. Args: input (Tensor): Input tensor to be resized. divisible_factor (int): The factor to which the image should be divisible. interpolation (str, optional): Interpolation flag. Defaults to \"bilinear\". align_corners (Optional[bool], optional): whether to align the corners of the input and output. Defaults to None. side (str, optional): Side to resize. Defaults to \"short\". antialias (bool, optional): If True, then image will be filtered with Gaussian before downscaling. Defaults to False. Returns: Tensor: The resized tensor.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "FunctionDef name:resize_to_be_divisible arg:input arg:divisible_factor arg:interpolation arg:align_corners arg:side arg:antialias arguments arg arg arg arg arg arg If BoolOp Call Compare Call Assign If BoolOp Call Compare Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "num_elements",
    "source_code": "def num_elements(self):\n    if self.is_fully_defined():\n        return functools.reduce(operator.mul, self.as_list(), 1)\n    else:\n        return None",
    "docstring": "Returns the total number of elements, or none for incomplete shapes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:num_elements arg:self arguments arg If Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "scrapy",
    "name": "_path_safe",
    "source_code": "def _path_safe(text: str) -> str:\n    pathable_slot = ''.join([c if c.isalnum() or c in '-._' else '_' for c in text])\n    unique_slot = hashlib.md5(text.encode('utf8')).hexdigest()\n    return f'{pathable_slot}-{unique_slot}'",
    "docstring": "Return a filesystem-safe version of a string `` >>> _path_safe('simple.org').startswith('simple.org') True >>> _path_safe('dash-underscore_.org').startswith('dash-underscore_.org') True >>> _path_safe('some@symbol?').startswith('some_symbol_') True",
    "type": "function",
    "file_path": "scrapy\\scrapy\\pqueues.py",
    "ast_data": "FunctionDef name:_path_safe arg:text arguments arg Assign Call BoolOp Call Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "clump_masked",
    "source_code": "def clump_masked(a):\n    mask = ma.getmask(a)\n    if mask is nomask:\n        return []\n    return _ezclump(mask)",
    "docstring": "Returns a list of slices corresponding to the masked clumps of a 1-D array. (A \"clump\" is defined as a contiguous region of the array). Parameters ---------- a : ndarray A one-dimensional masked array. Returns ------- slices : list of slice The list of slices, one for each continuous region of masked elements in . See Also -------- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges notmasked_contiguous, clump_unmasked Examples -------- >>> import numpy as np >>> a = np.ma.masked_array(np.arange(10)) >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked >>> np.ma.clump_masked(a) [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:clump_masked arg:a arguments arg Assign Call If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_make_str_method",
    "source_code": "def _make_str_method(*args, **kwargs):\n    indent = functools.partial(textwrap.indent, prefix=' ' * 4)\n\n    def strrepr(x):\n        return repr(x) if isinstance(x, str) else str(x)\n    return lambda self: type(self).__name__ + '(' + ','.join([*(indent('\\n' + strrepr(getattr(self, arg))) for arg in args), *(indent('\\n' + k + '=' + strrepr(getattr(self, arg))) for k, arg in kwargs.items())]) + ')'",
    "docstring": "Generate a `.Transform` will be .. code-block:: text {type(T).__name__}( {self.attr}, key={self.other})",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:_make_str_method arguments arg arg Assign Call FunctionDef name:strrepr arg:x arguments arg Return return:yes Call Call Call Return return:yes arguments arg Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "greater_equal",
    "source_code": "def greater_equal(a, b):\n    return _maybe_static(a) >= _maybe_static(b)",
    "docstring": "A version of tf.greater_equal that eagerly evaluates if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py",
    "ast_data": "FunctionDef name:greater_equal arg:a arg:b arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_bbox_edge_pos",
    "source_code": "@staticmethod\ndef get_bbox_edge_pos(bbox, loc):\n    x0, y0, x1, y1 = bbox.extents\n    if loc == 1:\n        return (x1, y1)\n    elif loc == 2:\n        return (x0, y1)\n    elif loc == 3:\n        return (x0, y0)\n    elif loc == 4:\n        return (x1, y0)",
    "docstring": "Return the `.BboxConnector` constructor.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\inset_locator.py",
    "ast_data": "FunctionDef name:get_bbox_edge_pos arg:bbox arg:loc arguments arg arg Assign If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "StataNonCatValueLabel",
    "source_code": "class StataNonCatValueLabel(StataValueLabel):\n\n    def __init__(self, labname: str, value_labels: dict[float, str], encoding: Literal['latin-1', 'utf-8']='latin-1') -> None:\n        if encoding not in ('latin-1', 'utf-8'):\n            raise ValueError('Only latin-1 and utf-8 are supported.')\n        self.labname = labname\n        self._encoding = encoding\n        self.value_labels = sorted(value_labels.items(), key=lambda x: x[0])\n        self._prepare_value_labels()",
    "docstring": "Prepare formatted version of value labels Parameters ---------- labname : str Value label name value_labels: Dictionary Mapping of values to labels encoding : {\"latin-1\", \"utf-8\"} Encoding to use for value labels.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "ClassDef name:StataNonCatValueLabel FunctionDef name:__init__ arg:self arg:labname arg:value_labels arg:encoding arguments arg arg arg arg If Compare Raise Call Assign Assign Assign Call Call arguments arg Call"
  },
  {
    "library": "pandas",
    "name": "shift",
    "source_code": "def shift(self, periods: int=1, freq=None) -> Self:\n    raise NotImplementedError",
    "docstring": "Shift index by desired number of time frequency increments. This method is for shifting the values of datetime-like indexes by a specified time increment a given number of times. Parameters ---------- periods : int, default 1 Number of periods (or increments) to shift by, can be positive or negative. freq : pandas.DateOffset, pandas.Timedelta or string, optional Frequency increment to shift by. If None, the index is shifted by its own attribute. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. Returns ------- pandas.DatetimeIndex Shifted index. See Also -------- Index.shift : Shift values of Index. PeriodIndex.shift : Shift values of PeriodIndex.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\datetimelike.py",
    "ast_data": "FunctionDef name:shift arg:self arg:periods arg:freq arguments arg arg arg Raise"
  },
  {
    "library": "pandas",
    "name": "set_function_name",
    "source_code": "def set_function_name(f: F, name: str, cls: type) -> F:\n    f.__name__ = name\n    f.__qualname__ = f'{cls.__name__}.{name}'\n    f.__module__ = cls.__module__\n    return f",
    "docstring": "Bind the name/qualname attributes of the function.",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\__init__.py",
    "ast_data": "FunctionDef name:set_function_name arg:f arg:name arg:cls arguments arg arg arg Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "log_cosh",
    "source_code": "@dispatch.add_dispatch_support\ndef log_cosh(y_true, y_pred):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = math_ops.cast(y_true, y_pred.dtype)\n\n    def _logcosh(x):\n        return x + math_ops.softplus(-2.0 * x) - math_ops.cast(math_ops.log(2.0), x.dtype)\n    return backend.mean(_logcosh(y_pred - y_true), axis=-1)",
    "docstring": "Logarithm of the hyperbolic cosine of the prediction error. is approximately equal to for small and to for large . This means that 'logcosh' works mostly like the mean squared error, but will not be so strongly affected by the occasional wildly incorrect prediction. Standalone usage: >>> y_true = np.random.random(size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.logcosh(y_true, y_pred) >>> assert loss.shape == (2,) >>> x = y_pred - y_true >>> assert np.allclose( ... loss.numpy(), ... np.mean(x + np.log(np.exp(-2. * x) + 1.) - math_ops.log(2.), axis=-1), ... atol=1e-5) Args: y_true: Ground truth values. shape = . y_pred: The predicted values. shape = . Returns: Logcosh error values. shape = .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:log_cosh arg:y_true arg:y_pred arguments arg arg Assign Call Assign Call FunctionDef name:_logcosh arg:x arguments arg Return return:yes Call Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ViewsPositionsBase",
    "source_code": "class ViewsPositionsBase(ToolBase):\n    _on_trigger = None\n\n    def trigger(self, sender, event, data=None):\n        self.toolmanager.get_tool(_views_positions).add_figure(self.figure)\n        getattr(self.toolmanager.get_tool(_views_positions), self._on_trigger)()\n        self.toolmanager.get_tool(_views_positions).update_view()",
    "docstring": "Base class for , and .",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ViewsPositionsBase Assign FunctionDef name:trigger arg:self arg:sender arg:event arg:data arguments arg arg arg arg Call Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "g",
    "source_code": "class g(sympy.Function):\n    nargs = 3\n\n    @classmethod\n    def eval(cls, n, rho, v):\n        if not n >= 0:\n            raise ValueError('must have n >= 0')\n        elif n == 0:\n            return 1\n        else:\n            return g(n - 1, rho, v) + gammasimp(gamma(rho + 2 + n) / gamma(rho + 2)) / gammasimp(gamma(3 + n) / gamma(3)) * v ** n",
    "docstring": "Helper function g according to Wright (1935) g(n, rho, v) = (1 + (rho+2)/3 * v + (rho+2)*(rho+3)/(2*3) * v^2 + ...) Note: Wright (1935) uses square root of above definition.",
    "type": "class",
    "file_path": "scipy\\scipy\\special\\_precompute\\wright_bessel.py",
    "ast_data": "ClassDef name:g Assign FunctionDef name:eval arg:cls arg:n arg:rho arg:v arguments arg arg arg arg If Compare Raise Call If Compare Return return:yes Return return:yes Call Call Call Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "decode",
    "source_code": "def decode(encoding=None, default_encoding='utf-8'):\n    body = cherrypy.request.body\n    if encoding is not None:\n        if not isinstance(encoding, list):\n            encoding = [encoding]\n        body.attempt_charsets = encoding\n    elif default_encoding:\n        if not isinstance(default_encoding, list):\n            default_encoding = [default_encoding]\n        body.attempt_charsets = body.attempt_charsets + default_encoding",
    "docstring": "Replace or extend the list of charsets used to decode a request entity. Either argument may be a single string or a list of strings. encoding If not None, restricts the set of charsets attempted while decoding a request entity to the given set (even if a different charset is given in the Content-Type request header). default_encoding Only in effect if the 'encoding' argument is not given. If given, the set of charsets attempted while decoding a request entity is *extended* with the given value(s).",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\encoding.py",
    "ast_data": "FunctionDef name:decode arg:encoding arg:default_encoding arguments arg arg Assign If Compare If Call Assign Assign If If Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "categorical_accuracy",
    "source_code": "@dispatch.add_dispatch_support\ndef categorical_accuracy(y_true, y_pred):\n    return math_ops.cast(math_ops.equal(math_ops.argmax(y_true, axis=-1), math_ops.argmax(y_pred, axis=-1)), backend.floatx())",
    "docstring": "Calculates how often predictions match one-hot labels. Standalone usage: >>> y_true = [[0, 0, 1], [0, 1, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred) >>> assert m.shape == (2,) >>> m.numpy() array([0., 1.], dtype=float32) You can provide logits of classes as , since argmax of logits and probabilities are same. Args: y_true: One-hot ground truth values. y_pred: The prediction values. Returns: Categorical accuracy values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:categorical_accuracy arg:y_true arg:y_pred arguments arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "create_nojekyll_and_cname",
    "source_code": "def create_nojekyll_and_cname(app: Sphinx, env: BuildEnvironment) -> None:\n    if app.builder.format != 'html':\n        return\n    app.builder.outdir.joinpath('.nojekyll').touch()\n    cname_path = app.builder.outdir / 'CNAME'\n    domain = _get_domain_from_url(app.config.html_baseurl)\n    if domain and (not domain.endswith('.github.io')):\n        with open(cname_path, 'w', encoding='utf-8') as f:\n            f.write(domain)\n    else:\n        cname_path.unlink(missing_ok=True)",
    "docstring": "Manage the `html_baseurl` files from the output directory.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\githubpages.py",
    "ast_data": "FunctionDef name:create_nojekyll_and_cname arg:app arg:env arguments arg arg If Compare Return return:no Call Call Assign Assign Call If BoolOp Call With Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "L2_cache_size",
    "source_code": "@cached_property\ndef L2_cache_size(self: Self) -> int:\n    device = torch.cuda.current_device()\n    props = torch.cuda.get_device_properties(device)\n    return props.L2_cache_size",
    "docstring": "Get the L2 cache size, in bytes, of the current device.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\benchmarking.py",
    "ast_data": "FunctionDef name:L2_cache_size arg:self arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Flatten",
    "source_code": "class Flatten(Module):\n    __constants__ = ['start_dim', 'end_dim']\n    start_dim: int\n    end_dim: int\n\n    def __init__(self, start_dim: int=1, end_dim: int=-1) -> None:\n        super().__init__()\n        self.start_dim = start_dim\n        self.end_dim = end_dim\n\n    def forward(self, input: Tensor) -> Tensor:\n        return input.flatten(self.start_dim, self.end_dim)\n\n    def extra_repr(self) -> str:\n        return f'start_dim={self.start_dim}, end_dim={self.end_dim}'",
    "docstring": "Flattens a contiguous range of dims into a tensor. For use with :class:, see :meth: for details. Shape: - Input: :math:,' where :math: is the size at dimension :math: and :math: means any number of dimensions including none. - Output: :math:. Args: start_dim: first dim to flatten (default = 1). end_dim: last dim to flatten (default = -1). Examples:: >>> input = torch.randn(32, 1, 5, 5) >>> # With default parameters >>> m = nn.Flatten() >>> output = m(input) >>> output.size() torch.Size([32, 25]) >>> # With non-default parameters >>> m = nn.Flatten(0, 2) >>> output = m(input) >>> output.size() torch.Size([160, 5])",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\flatten.py",
    "ast_data": "ClassDef name:Flatten Assign FunctionDef name:__init__ arg:self arg:start_dim arg:end_dim arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_parse_enc",
    "source_code": "def _parse_enc(path):\n    no_comments = re.sub('%.*', '', Path(path).read_text(encoding='ascii'))\n    array = re.search('(?s)\\\\[(.*)\\\\]', no_comments).group(1)\n    lines = [line for line in array.split() if line]\n    if all((line.startswith('/') for line in lines)):\n        return [line[1:] for line in lines]\n    else:\n        raise ValueError(f'Failed to parse {path} as Postscript encoding')",
    "docstring": "Parse a \\*.enc file referenced from a psfonts.map style file. The format supported by this function is a tiny subset of PostScript. Parameters ---------- path : Returns ------- list The nth list item is the PostScript glyph name of the nth glyph.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:_parse_enc arg:path arguments arg Assign Call Call Call Assign Call Call Assign Call If Call Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_unshard_shape",
    "source_code": "def _unshard_shape(self, shape):\n    shape = tensor_shape.as_shape(shape)\n    if self._number_of_shards == 1:\n        return shape\n    ndims = shape.ndims\n    if ndims is None:\n        raise ValueError(f'Shape {shape} must be statically known.')\n    if ndims <= self._shard_dimension:\n        raise ValueError(f'Shape {shape.as_list()} does not contain shard_dimension {self._shard_dimension}. Rank is too small.')\n    dims = shape.as_list()\n    dims[self._shard_dimension] *= self._number_of_shards\n    return tensor_shape.TensorShape(dims)",
    "docstring": "Return the unsharded shape that would generate a given sharded shape. Args: shape: the sharded shape to unshard Returns: The unsharded shape. Raises: ValueError: if shape is unknown or does not contain self.shard_dimension TypeError: if shape is not convertible to a TensorShape",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py",
    "ast_data": "FunctionDef name:_unshard_shape arg:self arg:shape arguments arg arg Assign Call If Compare Return return:yes Assign If Compare Raise Call If Compare Raise Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    if self.weights == 'uniform':\n        neigh_ind = self.kneighbors(X, return_distance=False)\n        neigh_dist = None\n    else:\n        neigh_dist, neigh_ind = self.kneighbors(X)\n    weights = _get_weights(neigh_dist, self.weights)\n    _y = self._y\n    if _y.ndim == 1:\n        _y = _y.reshape((-1, 1))\n    if weights is None:\n        y_pred = np.mean(_y[neigh_ind], axis=1)\n    else:\n        y_pred = np.empty((neigh_dist.shape[0], _y.shape[1]), dtype=np.float64)\n        denom = np.sum(weights, axis=1)\n        for j in range(_y.shape[1]):\n            num = np.sum(_y[neigh_ind, j] * weights, axis=1)\n            y_pred[:, j] = num / denom\n    if self._y.ndim == 1:\n        y_pred = y_pred.ravel()\n    return y_pred",
    "docstring": "Predict the target for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If , predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int Target values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_regression.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg If Compare Assign Call Assign Assign Call Assign Call Assign If Compare Assign Call If Compare Assign Call Assign Call Assign Call For Call Assign Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "no_manual_dependency_tracking_scope",
    "source_code": "@tf_contextlib.contextmanager\ndef no_manual_dependency_tracking_scope(obj):\n    previous_value = getattr(obj, '_manual_tracking', True)\n    obj._manual_tracking = False\n    try:\n        yield\n    finally:\n        obj._manual_tracking = previous_value",
    "docstring": "A context that disables manual dependency tracking for the given . Sometimes library methods might track objects on their own and we might want to disable that and do the tracking on our own. One can then use this context manager to disable the tracking the library method does and do your own tracking. For example: class TestLayer(tf.keras.Layer): def build(): with no_manual_dependency_tracking_scope(self): var = self.add_variable(\"name1\") # Creates a var and doesn't track it self._track_trackable(\"name2\", var) # We track variable with name Args: obj: A trackable object. Yields: a scope in which the object doesn't track dependencies manually.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:no_manual_dependency_tracking_scope arg:obj arguments arg Assign Call Assign Try Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_or_create_eval_step",
    "source_code": "def _get_or_create_eval_step():\n    graph = ops.get_default_graph()\n    eval_steps = graph.get_collection(ops.GraphKeys.EVAL_STEP)\n    if len(eval_steps) == 1:\n        return eval_steps[0]\n    elif len(eval_steps) > 1:\n        raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP')\n    else:\n        counter = variable_scope.get_variable('eval_step', shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer(), trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.EVAL_STEP])\n        return counter",
    "docstring": "Gets or creates the eval step . Returns: A representing a counter for the evaluation step. Raises: ValueError: If multiple have been added to the collection.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\evaluation.py",
    "ast_data": "FunctionDef name:_get_or_create_eval_step arguments Assign Call Assign Call If Compare Call Return return:yes If Compare Call Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "IndexName",
    "source_code": "class IndexName(TableColumns):\n\n    def __init__(self, table, columns, suffix, create_index_name):\n        self.suffix = suffix\n        self.create_index_name = create_index_name\n        super().__init__(table, columns)\n\n    def __str__(self):\n        return self.create_index_name(self.table, self.columns, self.suffix)",
    "docstring": "Hold a reference to an index name.",
    "type": "class",
    "file_path": "django\\django\\db\\backends\\ddl_references.py",
    "ast_data": "ClassDef name:IndexName FunctionDef name:__init__ arg:self arg:table arg:columns arg:suffix arg:create_index_name arguments arg arg arg arg arg Assign Assign Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_iterator_element_layouts",
    "source_code": "def set_iterator_element_layouts(self, iterator_resource_dtensor, layouts: List[layout_lib.Layout]):\n    _pywrap_dtensor_device.SetIteratorElementLayouts(context.context()._handle, iterator_resource_dtensor, [layout.to_string() for layout in layouts], self._device_info)",
    "docstring": "Sets the element layouts on an iterator resource tensor. Args: iterator_resource_dtensor: a DTensor created by packing the individiual iterator resource tensors. layouts: the flattened list of layouts to be applied to the elements emitted by the iterator resource DTensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py",
    "ast_data": "FunctionDef name:set_iterator_element_layouts arg:self arg:iterator_resource_dtensor arg:layouts arguments arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "from_proto",
    "source_code": "@classmethod\ndef from_proto(cls, proto: Any) -> 'FunctionType':\n    return FunctionType([Parameter.from_proto(p) for p in proto.parameters], collections.OrderedDict([(c.name, serialization.deserialize(c.type_constraint)) for c in proto.captures]))",
    "docstring": "Generate a FunctionType from the proto representation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:from_proto arg:cls arg:proto arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "isnumeric",
    "source_code": "def isnumeric(self):\n    return isnumeric(self)",
    "docstring": "For each element in , return True if there are only numeric characters in the element. See Also -------- char.isnumeric",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:isnumeric arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "event_shape_tensor",
    "source_code": "def event_shape_tensor(self, name='event_shape_tensor'):\n    with self._name_scope(name):\n        if self.event_shape.is_fully_defined():\n            return ops.convert_to_tensor(self.event_shape.as_list(), dtype=dtypes.int32, name='event_shape')\n        return self._event_shape_tensor()",
    "docstring": "Shape of a single sample from a single batch as a 1-D int32 . Args: name: name to give to the op Returns: event_shape: .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:event_shape_tensor arg:self arg:name arguments arg arg With Call If Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "Timeouts",
    "source_code": "class Timeouts:\n    occupied = 5\n    free = 1",
    "docstring": "Timeout constants.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "ClassDef name:Timeouts Assign Assign"
  },
  {
    "library": "django",
    "name": "prefetch_related",
    "source_code": "def prefetch_related(self, *lookups):\n    clone = self._clone()\n    if lookups == (None,):\n        clone._prefetch_related_lookups = ()\n    else:\n        clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups\n    return clone",
    "docstring": "Same as QuerySet.prefetch_related()",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:prefetch_related arg:self arguments arg arg Assign Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "count_resource_variables",
    "source_code": "def count_resource_variables(model):\n    if not isinstance(model, schema_fb.ModelT):\n        model = convert_bytearray_to_object(model)\n    unique_shared_names = set()\n    for subgraph in model.subgraphs:\n        if subgraph.operators is None:\n            continue\n        for op in subgraph.operators:\n            builtin_code = schema_util.get_builtin_code_from_operator_code(model.operatorCodes[op.opcodeIndex])\n            if builtin_code == schema_fb.BuiltinOperator.VAR_HANDLE:\n                unique_shared_names.add(op.builtinOptions.sharedName)\n    return len(unique_shared_names)",
    "docstring": "Calculates the number of unique resource variables in a model. Args: model: the input tflite model, either as bytearray or object. Returns: An integer number representing the number of unique resource variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:count_resource_variables arg:model arguments arg If Call Assign Call Assign Call For If Compare For Assign Call If Compare Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "associate_name_with_obj",
    "source_code": "def associate_name_with_obj(self, name: str, obj: Any):\n    maybe_existing = self._obj_to_name.setdefault(obj, name)\n    assert maybe_existing is name, 'obj is already associated'",
    "docstring": "Associate a unique name with an object. Neither nor should be associated already.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:associate_name_with_obj arg:self arg:name arg:obj arguments arg arg arg Assign Call Compare"
  },
  {
    "library": "pytorch",
    "name": "match_block_pointer_subexpr",
    "source_code": "def match_block_pointer_subexpr(expr: sympy.Expr, range_tree: IterationRangesRoot) -> Optional[BlockParameters]:\n    for match_func in (match_affine_block, match_mod_div_block):\n        match = match_func(expr, range_tree)\n        if match is not None:\n            return match\n    return None",
    "docstring": "Match a block indexing subexpression involving a single range tree.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:match_block_pointer_subexpr arg:expr arg:range_tree arguments arg arg For Assign Call If Compare Return return:yes Return return:no"
  },
  {
    "library": "scipy",
    "name": "hfft2",
    "source_code": "def hfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, plan=None):\n    if plan is not None:\n        raise NotImplementedError('Passing a precomputed plan is not yet supported by scipy.fft functions')\n    return hfftn(x, s, axes, norm, overwrite_x, workers)",
    "docstring": "2-D discrete Fourier transform of a Hermitian sequence",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\basic.py",
    "ast_data": "FunctionDef name:hfft2 arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, kl_fn):\n    if not callable(kl_fn):\n        raise TypeError('kl_fn must be callable, received: %s' % kl_fn)\n    if self._key in _DIVERGENCES:\n        raise ValueError('KL(%s || %s) has already been registered to: %s' % (self._key[0].__name__, self._key[1].__name__, _DIVERGENCES[self._key]))\n    _DIVERGENCES[self._key] = kl_fn\n    return kl_fn",
    "docstring": "Perform the KL registration. Args: kl_fn: The function to use for the KL divergence. Returns: kl_fn Raises: TypeError: if kl_fn is not a callable. ValueError: if a KL divergence function has already been registered for the given argument classes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\kullback_leibler.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:kl_fn arguments arg arg If Call Raise Call If Compare Raise Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_equal_aspect_axis_indices",
    "source_code": "def _equal_aspect_axis_indices(self, aspect):\n    ax_indices = []\n    if aspect == 'equal':\n        ax_indices = [0, 1, 2]\n    elif aspect == 'equalxy':\n        ax_indices = [0, 1]\n    elif aspect == 'equalxz':\n        ax_indices = [0, 2]\n    elif aspect == 'equalyz':\n        ax_indices = [1, 2]\n    return ax_indices",
    "docstring": "Get the indices for which of the x, y, z axes are constrained to have equal aspect ratios. Parameters ---------- aspect : {'auto', 'equal', 'equalxy', 'equalxz', 'equalyz'} See descriptions in docstring for .",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_equal_aspect_axis_indices arg:self arg:aspect arguments arg arg Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "draw_rubberband",
    "source_code": "def draw_rubberband(self, *data):\n    raise NotImplementedError",
    "docstring": "Draw rubberband. This method must get implemented per backend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:draw_rubberband arg:self arguments arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "draw_rubberband",
    "source_code": "def draw_rubberband(self, event, x0, y0, x1, y1):\n    pass",
    "docstring": "Draw a rectangle rubberband to indicate zoom limits. Note that it is not guaranteed that ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:draw_rubberband arg:self arg:event arg:x0 arg:y0 arg:x1 arg:y1 arguments arg arg arg arg arg arg"
  },
  {
    "library": "authlib",
    "name": "create_save_token_func",
    "source_code": "def create_save_token_func(session, token_model):\n\n    def save_token(token, request):\n        if request.user:\n            user_id = request.user.get_user_id()\n        else:\n            user_id = None\n        client = request.client\n        item = token_model(client_id=client.client_id, user_id=user_id, **token)\n        session.add(item)\n        session.commit()\n    return save_token",
    "docstring": "Create an `` function that can be used in authorization server. :param session: SQLAlchemy session :param token_model: Token model class",
    "type": "function",
    "file_path": "authlib\\authlib\\integrations\\sqla_oauth2\\functions.py",
    "ast_data": "FunctionDef name:create_save_token_func arg:session arg:token_model arguments arg arg FunctionDef name:save_token arg:token arg:request arguments arg arg If Assign Call Assign Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CircularPad3d",
    "source_code": "class CircularPad3d(_CircularPadNd):\n    padding: tuple[int, int, int, int, int, int]\n\n    def __init__(self, padding: _size_6_t) -> None:\n        super().__init__()\n        self.padding = _ntuple(6)(padding)\n\n    def _check_input_dim(self, input):\n        if input.dim() != 4 and input.dim() != 5:\n            raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)')",
    "docstring": "Pads the input tensor using circular padding of the input boundary. Tensor values at the beginning of the dimension are used to pad the end, and values at the end are used to pad the beginning. If negative padding is applied then the ends of the tensor get removed. For -dimensional padding, use :func:. Args: padding (int, tuple): the size of the padding. If is , uses the same padding in all boundaries. If a 6-, uses (:math:, :math:, :math:, :math:, :math:, :math:) Shape: - Input: :math: or :math:. - Output: :math: or :math:, where :math: :math: :math: Examples:: >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> m = nn.CircularPad3d(3) >>> input = torch.randn(16, 3, 8, 320, 480) >>> output = m(input) >>> # using different paddings for different sides >>> m = nn.CircularPad3d((3, 3, 6, 6, 1, 1)) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\padding.py",
    "ast_data": "ClassDef name:CircularPad3d FunctionDef name:__init__ arg:self arg:padding arguments arg arg Call Call Assign Call Call FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If BoolOp Compare Call Compare Call Raise Call Call"
  },
  {
    "library": "django",
    "name": "get_deferred_fields",
    "source_code": "def get_deferred_fields(self):\n    return {f.attname for f in self._meta.concrete_fields if f.attname not in self.__dict__}",
    "docstring": "Return a set containing names of deferred fields for this instance.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:get_deferred_fields arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "scrapy",
    "name": "deferred_f_from_coro_f",
    "source_code": "def deferred_f_from_coro_f(coro_f: Callable[_P, Awaitable[_T]]) -> Callable[_P, Deferred[_T]]:\n\n    @wraps(coro_f)\n    def f(*coro_args: _P.args, **coro_kwargs: _P.kwargs) -> Deferred[_T]:\n        return deferred_from_coro(coro_f(*coro_args, **coro_kwargs))\n    return f",
    "docstring": "Converts a coroutine function into a function that returns a Deferred. The coroutine function will be called at the time when the wrapper is called. Wrapper args will be passed to it. This is useful for callback chains, as callback functions are called with the previous callback result.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\defer.py",
    "ast_data": "FunctionDef name:deferred_f_from_coro_f arg:coro_f arguments arg FunctionDef name:f arguments arg arg Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "prod",
    "source_code": "def prod(xs: Sequence[NumberType]) -> NumberType:\n    return reduce(operator.mul, xs, 1)",
    "docstring": "Product of elements in input sequence. Returns 1 for empty sequence",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:prod arg:xs arguments arg Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "rfc4514_attribute_name",
    "source_code": "@property\ndef rfc4514_attribute_name(self) -> str:\n    return _NAMEOID_TO_NAME.get(self.oid, self.oid.dotted_string)",
    "docstring": "The short attribute name (for example \"CN\") if available, otherwise the OID dotted string.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\x509\\name.py",
    "ast_data": "FunctionDef name:rfc4514_attribute_name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "placeholder_value",
    "source_code": "@abc.abstractmethod\ndef placeholder_value(self, placeholder_context) -> Any:\n    pass",
    "docstring": "Creates a placeholder for tracing. tf.funcion traces with the placeholder value rather than the actual value. For example, a placeholder value can represent multiple different actual values. This means that the trace generated with that placeholder value is more general and reusable which saves expensive retracing. Args: placeholder_context: A context reserved for internal/future usage. For the example shared above, implementing: instructs tf.function to trace with the objects instead of the actual and objects when it receives a call to . For example, Tensor arguments are replaced with Tensors of similar shape and dtype, output from a tf.Placeholder op. More generally, placeholder values are the arguments of a tf.function, as seen from the function's body:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\trace.py",
    "ast_data": "FunctionDef name:placeholder_value arg:self arg:placeholder_context arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "offset_copy",
    "source_code": "def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):\n    _api.check_in_list(['dots', 'points', 'inches'], units=units)\n    if units == 'dots':\n        return trans + Affine2D().translate(x, y)\n    if fig is None:\n        raise ValueError('For units of inches or points a fig kwarg is needed')\n    if units == 'points':\n        x /= 72.0\n        y /= 72.0\n    return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)",
    "docstring": "Return a new transform with an added offset. Parameters ---------- trans : subclass Any transform, to which offset will be applied. fig : , default: None Current figure. It can be None if *units* are 'dots'. x, y : float, default: 0.0 The offset to apply. units : {'inches', 'points', 'dots'}, default: 'inches' Units of the offset. Returns ------- subclass Transform with applied offset.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:offset_copy arg:trans arg:fig arg:x arg:y arg:units arguments arg arg arg arg arg Call If Compare Return return:yes Call Call If Compare Raise Call If Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "initial_seed",
    "source_code": "def initial_seed() -> int:\n    _lazy_init()\n    idx = current_device()\n    default_generator = torch.xpu.default_generators[idx]\n    return default_generator.initial_seed()",
    "docstring": "Return the current random seed of the current GPU. .. warning:: This function eagerly initializes XPU.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\random.py",
    "ast_data": "FunctionDef name:initial_seed arguments Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_query_iterator",
    "source_code": "@staticmethod\ndef _query_iterator(result, exit_stack: ExitStack, chunksize: int, columns, index_col=None, coerce_float: bool=True, parse_dates=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> Generator[DataFrame]:\n    has_read_data = False\n    with exit_stack:\n        while True:\n            data = result.fetchmany(chunksize)\n            if not data:\n                if not has_read_data:\n                    yield _wrap_result([], columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend)\n                break\n            has_read_data = True\n            yield _wrap_result(data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend)",
    "docstring": "Return generator through chunked result set",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:_query_iterator arg:result arg:exit_stack arg:chunksize arg:columns arg:index_col arg:coerce_float arg:parse_dates arg:dtype arg:dtype_backend arguments arg arg arg arg arg arg arg arg arg Assign With While Assign Call If If Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_verify_nn_module_stack",
    "source_code": "def _verify_nn_module_stack(graph_module: torch.fx.GraphModule) -> None:\n    for i, mod in enumerate([graph_module] + list(graph_module.modules())):\n        if not isinstance(mod, torch.fx.GraphModule):\n            continue\n        for node in mod.graph.nodes:\n            if node.op in ['call_function', 'get_attr']:\n                if i == 0:\n                    if (nn_module_stack := node.meta.get('nn_module_stack', None)) is None:\n                        raise SpecViolationError(f'Node {node} of type {node.op} is missing nn_module_stack metadata')\n                    if not all((isinstance(k, str) and isinstance(v, tuple) and (len(v) == 2) and all((isinstance(x, str) for x in v)) for k, v in nn_module_stack.items())):\n                        raise SpecViolationError(f'Node {node} of type {node.op} has incorrect nn_module_stack metadata formatexpected Dict[str, Tuple[str, str]], but got {nn_module_stack}')\n            elif node.op in ['placeholder', 'output']:\n                if node.meta.get('nn_module_stack', None):\n                    raise SpecViolationError(f'Node {node} of type {node.op} contains nn_module_stack metadata, this should be None')",
    "docstring": "Perform nn_module_stack checks on the graph. Current constraints: For the top level graph: - populated for 'call_function', 'get_attr' - None for 'placeholder', 'output' For submodule graphs: - None for 'placeholder', output' TODO(pianpwk): make this a consistent node-level check once nn_module_stack is populated for cond submodules.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "FunctionDef name:_verify_nn_module_stack arg:graph_module arguments arg For Call Call Call If Call For If Compare If Compare If Compare Call Raise Call If Call BoolOp Call Call Compare Call Call Call Call Raise Call If Compare If Call Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "readlines",
    "source_code": "def readlines(self, sizehint=None):\n    if self.length is not None:\n        if sizehint is None:\n            sizehint = self.length - self.bytes_read\n        else:\n            sizehint = min(sizehint, self.length - self.bytes_read)\n    lines = []\n    seen = 0\n    while True:\n        line = self.readline()\n        if not line:\n            break\n        lines.append(line)\n        seen += len(line)\n        if seen >= sizehint:\n            break\n    return lines",
    "docstring": "Read lines from the request body and return them.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:readlines arg:self arg:sizehint arguments arg arg If Compare If Compare Assign Assign Call Assign Assign While Assign Call If Call Call If Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "statically_known_equals",
    "source_code": "def statically_known_equals(self, left: Union[Expr, int], right: Union[Expr, int]) -> bool:\n    return self.is_expr_static_and_true(sympy.Eq(left, right))",
    "docstring": "Returns a bool indicating if it is sound to optimize as if left and right are equal.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:statically_known_equals arg:self arg:left arg:right arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_explain_graph_detail",
    "source_code": "def _explain_graph_detail(gm: torch.fx.GraphModule, graphs, op_count, ops_per_graph, break_reasons):\n    graphs.append(gm)\n    ops = [node.target for node in gm.graph.nodes if node.op == 'call_function']\n    op_count += len(ops)\n    ops_per_graph.append(ops)\n    if gm.compile_subgraph_reason.graph_break:\n        break_reasons.append(gm.compile_subgraph_reason)\n    return (gm, graphs, op_count, ops_per_graph, break_reasons)",
    "docstring": "This function is a utility which processes a torch.fx.GraphModule and accumulates information about its ops, graph breaks, and other details. It is intended to be used by the ExplainWithBackend class and to provide details from Dynamo's graph capture. Parameters: gm (torch.fx.GraphModule): The GraphModule to be processed. graphs (list): A list that accumulates all the GraphModules processed. op_count (int): The total count of operations in all GraphModules processed so far. ops_per_graph (list): A list that accumulates the operations of each GraphModule. break_reasons (list): A list that accumulates the reasons for breaks in each GraphModule. Returns: tuple: A tuple containing the processed GraphModule, the updated lists of graphs, operations per graph, and break reasons, and the updated operation count.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\backends\\debugging.py",
    "ast_data": "FunctionDef name:_explain_graph_detail arg:gm arg:graphs arg:op_count arg:ops_per_graph arg:break_reasons arguments arg arg arg arg arg Call Assign Compare Call Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_variable",
    "source_code": "def _create_variable(self, *args, **kwargs):\n    with ops.name_scope('random_generator'):\n        kwargs['name'] = 'StateVar'\n        v = variables.Variable(*args, **kwargs)\n    if isinstance(v, sharded_variable.ShardedVariable):\n        raise ValueError(\"tf.random.Generator state is sharded, which is not allowed. When creating a tf.distribute.experimental.ParameterServerStrategy, please make sure that the `variable_partitioner` argument won't shard a small variable of shape [2] or [3]. Ways to avoid sharding small variables include setting `variable_partitioner` to None or to tf.distribute.experimental.partitioners.MinSizePartitioner with a large enough `min_shard_bytes`.\")\n    return v",
    "docstring": "Creates a variable. Args: *args: positional arguments passed along to variables.Variable. Returns: The created variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:_create_variable arg:self arguments arg arg arg With Call Assign Assign Call If Call Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "generic_laplace",
    "source_code": "@_ni_docstrings.docfiller\ndef generic_laplace(input, derivative2, output=None, mode='reflect', cval=0.0, extra_arguments=(), extra_keywords=None, *, axes=None):\n    if extra_keywords is None:\n        extra_keywords = {}\n    input = np.asarray(input)\n    output = _ni_support._get_output(output, input)\n    axes = _ni_support._check_axes(axes, input.ndim)\n    if len(axes) > 0:\n        modes = _ni_support._normalize_sequence(mode, len(axes))\n        derivative2(input, axes[0], output, modes[0], cval, *extra_arguments, **extra_keywords)\n        for ii in range(1, len(axes)):\n            tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval, *extra_arguments, **extra_keywords)\n            output += tmp\n    else:\n        output[...] = input[...]\n    return output",
    "docstring": "N-D Laplace filter using a provided second derivative function. Parameters ---------- %(input)s derivative2 : callable Callable with the following signature:: derivative2(input, axis, output, mode, cval, *extra_arguments, **extra_keywords) See , below. %(output)s %(mode_multiple)s %(cval)s %(extra_keywords)s %(extra_arguments)s axes : tuple of int or None The axes over which to apply the filter. If a tuple is provided, its length must match the number of axes. Returns ------- generic_laplace : ndarray Filtered array. Has the same shape as .",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_filters.py",
    "ast_data": "FunctionDef name:generic_laplace arg:input arg:derivative2 arg:output arg:mode arg:cval arg:extra_arguments arg:extra_keywords arguments arg arg arg arg arg arg arg arg If Compare Assign Assign Call Assign Call Assign Call If Compare Call Assign Call Call Call For Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "add_image",
    "source_code": "def add_image(self, image):\n    _api.check_isinstance(mimage.AxesImage, image=image)\n    self._set_artist_props(image)\n    if not image.get_label():\n        image.set_label(f'_child{len(self._children)}')\n    self._children.append(image)\n    image._remove_method = self._children.remove\n    self.stale = True\n    return image",
    "docstring": "Add an to the Axes; return the image.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:add_image arg:self arg:image arguments arg arg Call Call If Call Call Call Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "join",
    "source_code": "def join(self):\n    self._cluster.join()",
    "docstring": "Blocks until all the scheduled functions have finished execution. If any previously scheduled function raises an error, will fail by raising any one of those errors, and clear the errors collected so far. If this happens, some of the previously scheduled functions may have not been executed. Users can call on the returned to inspect if they have executed, failed, or cancelled. If some that have been cancelled need to be rescheduled, users should call with the function again. When returns or raises, it guarantees that there is no function that is still being executed. Raises: Exception: one of the exceptions caught by the coordinator by any previously scheduled function since the last time an error was thrown or since the beginning of the program.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:join arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "_on_key_down",
    "source_code": "def _on_key_down(self, event):\n    KeyEvent('key_press_event', self, self._get_key(event), *self._mpl_coords(), guiEvent=event)._process()\n    if self:\n        event.Skip()",
    "docstring": "Capture key press.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_on_key_down arg:self arg:event arguments arg arg Call Call Call Call If Call"
  },
  {
    "library": "pandas",
    "name": "_masked_arith_op",
    "source_code": "def _masked_arith_op(x: np.ndarray, y, op) -> np.ndarray:\n    xrav = x.ravel()\n    if isinstance(y, np.ndarray):\n        dtype = find_common_type([x.dtype, y.dtype])\n        result = np.empty(x.size, dtype=dtype)\n        if len(x) != len(y):\n            raise ValueError(x.shape, y.shape)\n        ymask = notna(y)\n        yrav = y.ravel()\n        mask = notna(xrav) & ymask.ravel()\n        if mask.any():\n            result[mask] = op(xrav[mask], yrav[mask])\n    else:\n        if not is_scalar(y):\n            raise TypeError(f'Cannot broadcast np.ndarray with operand of type {type(y)}')\n        result = np.empty(x.size, dtype=x.dtype)\n        mask = notna(xrav)\n        if op is pow:\n            mask = np.where(x == 1, False, mask)\n        elif op is roperator.rpow:\n            mask = np.where(y == 1, False, mask)\n        if mask.any():\n            result[mask] = op(xrav[mask], y)\n    np.putmask(result, ~mask, np.nan)\n    result = result.reshape(x.shape)\n    return result",
    "docstring": "If the given arithmetic operation fails, attempt it again on only the non-null elements of the input array(s). Parameters ---------- x : np.ndarray y : np.ndarray, Series, Index op : binary operator",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_masked_arith_op arg:x arg:y arg:op arguments arg arg arg Assign Call If Call Assign Call Assign Call If Compare Call Call Raise Call Assign Call Assign Call Assign Call Call If Call Assign Call If Call Raise Call Call Assign Call Assign Call If Compare Assign Call Compare If Compare Assign Call Compare If Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "capabilities",
    "source_code": "def capabilities(self):\n    return {'boolean indexing': True, 'data-dependent shapes': True, 'max dimensions': 64}",
    "docstring": "Return a dictionary of array API library capabilities. The resulting dictionary has the following keys: - **\"boolean indexing\"**: boolean indicating whether an array library supports boolean indexing. Always `` for CuPy. See for more details. See Also -------- __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- capabilities : dict A dictionary of array API library capabilities. Examples -------- >>> info = xp.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, 'data-dependent shapes': True, 'max dimensions': 64}",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\cupy\\_info.py",
    "ast_data": "FunctionDef name:capabilities arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_aux_axes",
    "source_code": "def get_aux_axes(self, tr=None, viewlim_mode='equal', axes_class=None, **kwargs):\n    if axes_class is None:\n        axes_class = self._base_axes_class\n    parasite_axes_class = parasite_axes_class_factory(axes_class)\n    ax2 = parasite_axes_class(self, tr, viewlim_mode=viewlim_mode, **kwargs)\n    self.parasites.append(ax2)\n    ax2._remove_method = self.parasites.remove\n    return ax2",
    "docstring": "Add a parasite axes to this host. Despite this method's name, this should actually be thought of as an `~matplotlib.transforms.Transform.Transform~matplotlib.axes.Axes~.axes.Axes` subclass that is instantiated. If None, the base class of the host axes is used. **kwargs Other parameters are forwarded to the parasite axes constructor.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\parasite_axes.py",
    "ast_data": "FunctionDef name:get_aux_axes arg:self arg:tr arg:viewlim_mode arg:axes_class arguments arg arg arg arg arg If Compare Assign Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "infeed_dequeue_tuple",
    "source_code": "def infeed_dequeue_tuple(dtypes, shapes, name=None):\n    for dtype in dtypes:\n        if dtype not in _SUPPORTED_INFEED_DTYPES:\n            raise TypeError('{} is not a supported TPU infeed type. Supported types are: {}'.format(dtype, list(_SUPPORTED_INFEED_DTYPES)))\n    return gen_tpu_ops.infeed_dequeue_tuple(dtypes, shapes, name=name)",
    "docstring": "A placeholder op for values fed into the TPU simultaneously as a tuple. Args: dtypes: A list of s that has length . The element types of each element in . shapes: A list of shapes (each a or list of ). The shapes of each tensor in . name: A name for the operation (optional). Returns: A list of objects of type . A list of tensors that will be provided using the infeed mechanism. Raises: TypeError: If a type in 'dtypes` is not a supported infeed type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\ops\\tpu_ops.py",
    "ast_data": "FunctionDef name:infeed_dequeue_tuple arg:dtypes arg:shapes arg:name arguments arg arg arg For If Compare Raise Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_start_callback",
    "source_code": "def register_start_callback(self, callback: Callable[[], None]) -> Callable[[], None]:\n    self.start_callbacks.append(callback)\n    return callback",
    "docstring": "Register a callback function to be called when the compilation starts. Args: - callback (Callable): The callback function to register.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\callback.py",
    "ast_data": "FunctionDef name:register_start_callback arg:self arg:callback arguments arg arg Call Return return:yes"
  },
  {
    "library": "django",
    "name": "create",
    "source_code": "def create(self, **kwargs):\n    reverse_one_to_one_fields = frozenset(kwargs).intersection(self.model._meta._reverse_one_to_one_field_names)\n    if reverse_one_to_one_fields:\n        raise ValueError('The following fields do not exist in this model: %s' % ', '.join(reverse_one_to_one_fields))\n    obj = self.model(**kwargs)\n    self._for_write = True\n    obj.save(force_insert=True, using=self.db)\n    return obj",
    "docstring": "Create a new object with the given kwargs, saving it to the database and returning the created object.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:create arg:self arguments arg arg Assign Call Call If Raise Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter_max",
    "source_code": "def scatter_max(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(gen_resource_variable_ops.resource_scatter_max(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))",
    "docstring": "Updates this variable with the max of and itself. Args: sparse_delta: to use as an argument of max with this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:scatter_max arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "run",
    "source_code": "def run(self, feed_dict=None, session=None) -> None:\n    _run_using_default_session(self, feed_dict, self.graph, session)",
    "docstring": "Runs this operation in a . Calling this method will execute all preceding operations that produce the inputs needed for this operation. *N.B.* Before invoking , its graph must have been launched in a session, and either a default session must be available, or must be specified explicitly. Args: feed_dict: A dictionary that maps objects to feed values. See for a description of the valid feed values. session: (Optional.) The to be used to run to this operation. If none, the default session will be used.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:run arg:self arg:feed_dict arg:session arguments arg arg arg Call"
  },
  {
    "library": "virtualenv",
    "name": "current",
    "source_code": "@classmethod\ndef current(cls, app_data=None):\n    if cls._current is None:\n        cls._current = cls.from_exe(sys.executable, app_data, raise_on_error=True, resolve_to_host=False)\n    return cls._current",
    "docstring": "This locates the current host interpreter information. This might be different than what we run into in case the host python has been upgraded from underneath us.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\py_info.py",
    "ast_data": "FunctionDef name:current arg:cls arg:app_data arguments arg arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "deprecate_nonkeyword_arguments",
    "source_code": "def deprecate_nonkeyword_arguments(version: str | None, allowed_args: list[str] | None=None, name: str | None=None) -> Callable[[F], F]:\n\n    def decorate(func):\n        old_sig = inspect.signature(func)\n        if allowed_args is not None:\n            allow_args = allowed_args\n        else:\n            allow_args = [p.name for p in old_sig.parameters.values() if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) and p.default is p.empty]\n        new_params = [p.replace(kind=p.KEYWORD_ONLY) if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) and p.name not in allow_args else p for p in old_sig.parameters.values()]\n        new_params.sort(key=lambda p: p.kind)\n        new_sig = old_sig.replace(parameters=new_params)\n        num_allow_args = len(allow_args)\n        msg = f'{future_version_msg(version)} all arguments of {name or func.__qualname__}{{arguments}} will be keyword-only.'\n\n        @wraps(func)\n        def wrapper(*args, **kwargs):\n            if len(args) > num_allow_args:\n                warnings.warn(msg.format(arguments=_format_argument_list(allow_args)), FutureWarning, stacklevel=find_stack_level())\n            return func(*args, **kwargs)\n        wrapper.__signature__ = new_sig\n        return wrapper\n    return decorate",
    "docstring": "Decorator to deprecate a use of non-keyword arguments of a function. Parameters ---------- version : str, optional The version in which positional arguments will become keyword-only. If None, then the warning message won't specify any particular version. allowed_args : list, optional In case of list, it must be the list of names of some first arguments of the decorated functions that are OK to be given as positional arguments. In case of None value, defaults to list of all arguments not having the default value. name : str, optional The specific name of the function to show in the warning message. If None, then the Qualified name of the function is used.",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_decorators.py",
    "ast_data": "FunctionDef name:deprecate_nonkeyword_arguments arg:version arg:allowed_args arg:name arguments arg arg arg FunctionDef name:decorate arg:func arguments arg Assign Call If Compare Assign Assign Call BoolOp Compare Compare Assign BoolOp Compare Compare Call Call Call arguments arg Assign Call Assign Call Assign Call BoolOp FunctionDef name:wrapper arguments arg arg If Compare Call Call Call Call Call Return return:yes Call Call Assign Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_getrow",
    "source_code": "def _getrow(self, i):\n    if self.ndim == 1:\n        raise ValueError('getrow not meaningful for a 1d array')\n    M = self.shape[0]\n    if i < 0:\n        i += M\n    if i < 0 or i >= M:\n        raise IndexError('index out of bounds')\n    row_selector = self._csr_container(([1], [[0], [i]]), shape=(1, M), dtype=self.dtype)\n    return row_selector @ self",
    "docstring": "Returns a copy of row i of the array, as a (1 x n) sparse array (row vector).",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:_getrow arg:self arg:i arguments arg arg If Compare Raise Call Assign If Compare If BoolOp Compare Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_check_can_cache",
    "source_code": "@staticmethod\ndef _check_can_cache(gm: torch.fx.GraphModule) -> None:\n    for p in (config.post_grad_custom_pre_pass, config.post_grad_custom_post_pass):\n        if p and (not isinstance(p, CustomGraphPass) or not p.uuid()):\n            raise BypassFxGraphCache('Unsupported post grad custom pass')\n    if has_frozen_params(gm) and (not torch._utils_internal.justknobs_check('pytorch/inductor:allow_freezing_with_caching')):\n        raise BypassFxGraphCache('Skipping graph with frozen constants')\n    if config.aot_inductor.use_runtime_constant_folding:\n        raise BypassFxGraphCache(\"Runtime constant folding can introduce constants that aren't static across runs\")\n    from torch._inductor.compiler_bisector import CompilerBisector\n    if CompilerBisector.bisection_enabled:\n        log.debug('dont cache graph when bisect enabled')\n        raise BypassFxGraphCache\n    if FxGraphCache._get_shape_env() is None:\n        log.debug('fx graph cache no shape env')\n        raise BypassFxGraphCache('No shape env')\n    FxGraphCache._check_for_hop(gm)",
    "docstring": "Check some conditions that would preclude caching and raise BypassFxGraphCache to bypass in case caching is not possible.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:_check_can_cache arg:gm arguments arg For If BoolOp BoolOp Call Call Raise Call If BoolOp Call Call Raise Call If Raise Call If Call Raise If Compare Call Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "next_power_of_2",
    "source_code": "def next_power_of_2(n: int) -> int:\n    n -= 1\n    n |= n >> 1\n    n |= n >> 2\n    n |= n >> 4\n    n |= n >> 8\n    n |= n >> 16\n    n |= n >> 32\n    n += 1\n    return n",
    "docstring": "Return the smallest power of 2 greater than or equal to n",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\runtime_utils.py",
    "ast_data": "FunctionDef name:next_power_of_2 arg:n arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "Timer",
    "source_code": "class Timer(object):\n\n    def __init__(self, expiration):\n        self.expiration = expiration\n\n    @classmethod\n    def after(cls, elapsed):\n        return cls(datetime.datetime.now(datetime.timezone.utc) + elapsed)\n\n    def expired(self):\n        return datetime.datetime.now(datetime.timezone.utc) >= self.expiration",
    "docstring": "A simple timer that will indicate when an expiration time has passed.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\lib\\locking.py",
    "ast_data": "ClassDef name:Timer FunctionDef name:__init__ arg:self arg:expiration arguments arg arg Assign FunctionDef name:after arg:cls arg:elapsed arguments arg arg Return return:yes Call Call FunctionDef name:expired arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "numpy",
    "name": "_float_to_str",
    "source_code": "def _float_to_str(self, value):\n    return self.params['fmt'] % array(_fr0(value)[0], self.ftype)",
    "docstring": "Converts float to str. Parameters ---------- value : float value to be converted.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\getlimits.py",
    "ast_data": "FunctionDef name:_float_to_str arg:self arg:value arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "BboxTransform",
    "source_code": "class BboxTransform(Affine2DBase):\n    is_separable = True\n\n    def __init__(self, boxin, boxout, **kwargs):\n        _api.check_isinstance(BboxBase, boxin=boxin, boxout=boxout)\n        super().__init__(**kwargs)\n        self._boxin = boxin\n        self._boxout = boxout\n        self.set_children(boxin, boxout)\n        self._mtx = None\n        self._inverted = None\n    __str__ = _make_str_method('_boxin', '_boxout')\n\n    def get_matrix(self):\n        if self._invalid:\n            inl, inb, inw, inh = self._boxin.bounds\n            outl, outb, outw, outh = self._boxout.bounds\n            x_scale = outw / inw\n            y_scale = outh / inh\n            if DEBUG and (x_scale == 0 or y_scale == 0):\n                raise ValueError('Transforming from or to a singular bounding box')\n            self._mtx = np.array([[x_scale, 0.0, -inl * x_scale + outl], [0.0, y_scale, -inb * y_scale + outb], [0.0, 0.0, 1.0]], float)\n            self._inverted = None\n            self._invalid = 0\n        return self._mtx",
    "docstring": "linearly transforms points from one to another.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "ClassDef name:BboxTransform Assign FunctionDef name:__init__ arg:self arg:boxin arg:boxout arguments arg arg arg arg Call Call Call Assign Assign Call Assign Assign Assign Call FunctionDef name:get_matrix arg:self arguments arg If Assign Assign Assign Assign If BoolOp BoolOp Compare Compare Raise Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "isenumattribute",
    "source_code": "def isenumattribute(x: Any) -> TypeIs[enum.Enum]:\n    return isinstance(x, enum.Enum)",
    "docstring": "Check if the object is an enumeration attribute.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:isenumattribute arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "RegularGridInterpolatorSubclass",
    "source_code": "class RegularGridInterpolatorSubclass(Benchmark):\n    param_names = ['ndim', 'max_coord_size', 'n_samples', 'flipped']\n    params = [[2, 3, 4], [10, 40, 200], [10, 100, 1000, 10000], [1, -1]]\n\n    def setup(self, ndim, max_coord_size, n_samples, flipped):\n        rng = np.random.default_rng(314159)\n        coord_sizes = [max_coord_size // 2 ** i for i in range(ndim)]\n        self.points = [np.sort(rng.random(size=s))[::flipped] for s in coord_sizes]\n        self.values = rng.random(size=coord_sizes)\n        bounds = [(p.min(), p.max()) for p in self.points]\n        xi = [rng.uniform(low, high, size=n_samples) for low, high in bounds]\n        self.xi = np.array(xi).T\n        self.interp = RegularGridInterpolatorValues(self.points, self.xi)\n\n    def time_rgi_setup_interpolator(self, ndim, max_coord_size, n_samples, flipped):\n        self.interp = RegularGridInterpolatorValues(self.points, self.xi)\n\n    def time_rgi(self, ndim, max_coord_size, n_samples, flipped):\n        self.interp(self.values)",
    "docstring": "Benchmark RegularGridInterpolator with method=\"linear\".",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\interpolate.py",
    "ast_data": "ClassDef name:RegularGridInterpolatorSubclass Assign Assign FunctionDef name:setup arg:self arg:ndim arg:max_coord_size arg:n_samples arg:flipped arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Assign Call Assign Call Assign Call FunctionDef name:time_rgi_setup_interpolator arg:self arg:ndim arg:max_coord_size arg:n_samples arg:flipped arguments arg arg arg arg arg Assign Call FunctionDef name:time_rgi arg:self arg:ndim arg:max_coord_size arg:n_samples arg:flipped arguments arg arg arg arg arg Call"
  },
  {
    "library": "numpy",
    "name": "_recursive_guard",
    "source_code": "def _recursive_guard(fillvalue='...'):\n\n    def decorating_function(f):\n        repr_running = set()\n\n        @functools.wraps(f)\n        def wrapper(self, *args, **kwargs):\n            key = (id(self), get_ident())\n            if key in repr_running:\n                return fillvalue\n            repr_running.add(key)\n            try:\n                return f(self, *args, **kwargs)\n            finally:\n                repr_running.discard(key)\n        return wrapper\n    return decorating_function",
    "docstring": "Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs Decorates a function such that if it calls itself with the same first argument, it returns instead of recursing. Largely copied from reprlib.recursive_repr",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:_recursive_guard arg:fillvalue arguments arg FunctionDef name:decorating_function arg:f arguments arg Assign Call FunctionDef name:wrapper arg:self arguments arg arg arg Assign Call Call If Compare Return return:yes Call Try Return return:yes Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_hatch_linewidth",
    "source_code": "def get_hatch_linewidth(self):\n    return self._hatch_linewidth",
    "docstring": "Return the hatch linewidth.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_hatch_linewidth arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fsolve",
    "source_code": "def fsolve(func, x0, args=(), fprime=None, full_output=0, col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, epsfcn=None, factor=100, diag=None):\n\n    def _wrapped_func(*fargs):\n        _wrapped_func.nfev += 1\n        return func(*fargs)\n    _wrapped_func.nfev = 0\n    options = {'col_deriv': col_deriv, 'xtol': xtol, 'maxfev': maxfev, 'band': band, 'eps': epsfcn, 'factor': factor, 'diag': diag}\n    res = _root_hybr(_wrapped_func, x0, args, jac=fprime, **options)\n    res.nfev = _wrapped_func.nfev\n    if full_output:\n        x = res['x']\n        info = {k: res.get(k) for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res}\n        info['fvec'] = res['fun']\n        return (x, info, res['status'], res['message'])\n    else:\n        status = res['status']\n        msg = res['message']\n        if status == 0:\n            raise TypeError(msg)\n        elif status == 1:\n            pass\n        elif status in [2, 3, 4, 5]:\n            warnings.warn(msg, RuntimeWarning, stacklevel=2)\n        else:\n            raise TypeError(msg)\n        return res['x']",
    "docstring": "Find the roots of a function. Return the roots of the (non-linear) equations defined by `funcfuncxtolx0epsfcnmesgmesg`. >>> import numpy as np >>> from scipy.optimize import fsolve >>> def func(x): ... return [x[0] * np.cos(x[1]) - 4, ... x[1] * x[0] - x[1] - 5] >>> root = fsolve(func, [1, 1]) >>> root array([6.50409711, 0.90841421]) >>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0. array([ True, True])",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_minpack_py.py",
    "ast_data": "FunctionDef name:fsolve arg:func arg:x0 arg:args arg:fprime arg:full_output arg:col_deriv arg:xtol arg:maxfev arg:band arg:epsfcn arg:factor arg:diag arguments arg arg arg arg arg arg arg arg arg arg arg arg FunctionDef name:_wrapped_func arguments arg Return return:yes Call Assign Assign Assign Call Assign If Assign Assign Call Compare Assign Return return:yes Assign Assign If Compare Raise Call If Compare If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "Session",
    "source_code": "class Session(AbstractBaseSession):\n    objects = SessionManager()\n\n    @classmethod\n    def get_session_store_class(cls):\n        from django.contrib.sessions.backends.db import SessionStore\n        return SessionStore\n\n    class Meta(AbstractBaseSession.Meta):\n        db_table = 'django_session'",
    "docstring": "Django provides full support for anonymous sessions. The session framework lets you store and retrieve arbitrary data on a per-site-visitor basis. It stores data on the server side and abstracts the sending and receiving of cookies. Cookies contain a session ID -- not the data itself. The Django sessions framework is entirely cookie-based. It does not fall back to putting session IDs in URLs. This is an intentional design decision. Not only does that behavior make URLs ugly, it makes your site vulnerable to session-ID theft via the \"Referer\" header. For complete documentation on using Sessions in your code, consult the sessions documentation that is shipped with Django (also available on the Django web site).",
    "type": "class",
    "file_path": "django\\django\\contrib\\sessions\\models.py",
    "ast_data": "ClassDef name:Session Assign Call FunctionDef name:get_session_store_class arg:cls arguments arg Return return:yes ClassDef name:Meta Assign"
  },
  {
    "library": "scrapy",
    "name": "copy",
    "source_code": "def copy(self) -> Self:\n    return copy.deepcopy(self)",
    "docstring": "Make a deep copy of current settings. This method returns a new instance of the :class: class, populated with the same values and their priorities. Modifications to the new object won't be reflected on the original settings.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_ZerosLikeV2",
    "source_code": "def _ZerosLikeV2(op, index):\n    val = op.outputs[index]\n    if val.dtype == dtypes.resource:\n        return array_ops.zeros(gen_resource_variable_ops.variable_shape(val), dtype=default_gradient.get_zeros_dtype(val))\n    if isinstance(val.op.graph, control_flow_v2_func_graphs.WhileBodyFuncGraph) and val.dtype != dtypes.variant:\n        if val.shape.is_fully_defined():\n            return _ConstantZeros(val.shape.dims, val.dtype)\n        else:\n            zeros_shape = array_ops.shape_internal(val, optimize=False)\n            return array_ops.zeros(zeros_shape, val.dtype)\n    else:\n        return array_ops.zeros_like(val, optimize=False)",
    "docstring": "Branch of ZerosLike for TF2.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:_ZerosLikeV2 arg:op arg:index arguments arg arg Assign If Compare Return return:yes Call Call Call If BoolOp Call Compare If Call Return return:yes Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, images: Tensor) -> tuple[Tensor, Tensor]:\n    feats = self.backbone(images)\n    feats_buf = self.encoder(feats)\n    logits, boxes = self.decoder(feats_buf)\n    return (logits, boxes)",
    "docstring": "Detect objects in an image. Args: images: images to be detected. Shape :math:. Returns: - **logits** - Tensor of shape :math:, where :math: is the number of queries, :math: is the number of classes. - **boxes** - Tensor of shape :math:, where :math: is the number of queries.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\rt_detr\\model.py",
    "ast_data": "FunctionDef name:forward arg:self arg:images arguments arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_params",
    "source_code": "@classmethod\ndef from_params(cls, wi, wh, bi=None, bh=None, split_gates=False):\n    assert (bi is None) == (bh is None)\n    input_size = wi.shape[1]\n    hidden_size = wh.shape[1]\n    cell = cls(input_dim=input_size, hidden_dim=hidden_size, bias=bi is not None, split_gates=split_gates)\n    if not split_gates:\n        cell.igates.weight = torch.nn.Parameter(wi)\n        if bi is not None:\n            cell.igates.bias = torch.nn.Parameter(bi)\n        cell.hgates.weight = torch.nn.Parameter(wh)\n        if bh is not None:\n            cell.hgates.bias = torch.nn.Parameter(bh)\n    else:\n        for w, b, gates in zip([wi, wh], [bi, bh], [cell.igates, cell.hgates]):\n            for w_chunk, gate in zip(w.chunk(4, dim=0), gates.values()):\n                gate.weight = torch.nn.Parameter(w_chunk)\n            if b is not None:\n                for b_chunk, gate in zip(b.chunk(4, dim=0), gates.values()):\n                    gate.bias = torch.nn.Parameter(b_chunk)\n    return cell",
    "docstring": "Uses the weights and biases to create a new LSTM cell. Args: wi, wh: Weights for the input and hidden layers bi, bh: Biases for the input and hidden layers",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\quantizable\\modules\\rnn.py",
    "ast_data": "FunctionDef name:from_params arg:cls arg:wi arg:wh arg:bi arg:bh arg:split_gates arguments arg arg arg arg arg arg Compare Compare Compare Assign Assign Assign Call Compare If Assign Call If Compare Assign Call Assign Call If Compare Assign Call For Call For Call Call Call Assign Call If Compare For Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "urlsafe_base64_encode",
    "source_code": "def urlsafe_base64_encode(s):\n    return base64.urlsafe_b64encode(s).rstrip(b'\\n=').decode('ascii')",
    "docstring": "Encode a bytestring to a base64 string for use in URLs. Strip any trailing equal signs.",
    "type": "function",
    "file_path": "django\\django\\utils\\http.py",
    "ast_data": "FunctionDef name:urlsafe_base64_encode arg:s arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reshape",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef reshape(x, shape):\n    return array_ops.reshape(x, shape)",
    "docstring": "Reshapes a tensor to the specified shape. Args: x: Tensor or variable. shape: Target shape tuple. Returns: A tensor. Example: >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) >>> a >>> tf.keras.backend.reshape(a, shape=(2, 6))",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:reshape arg:x arg:shape arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "apply_checkpoint_execution_state_in_allocator",
    "source_code": "def apply_checkpoint_execution_state_in_allocator(self) -> None:\n    assert isinstance(self.current_node, CUDAGraphNode)\n    self.debug_checkpointing_counter += 1\n    log.debug('Checkpointing cuda caching allocator state. Number of checkpoints %d', self.debug_checkpointing_counter)\n    state = self.current_node.checkpointed_caching_state\n    device = self.current_node.device\n    assert state is not None and device is not None\n    stale_storages: list[int] = []\n    self.current_node.remove_path_cached_tensors()\n    live_storages_wrappers = list(self.current_node.path_live_weakrefs())\n    live_storages_weak_refs: list[int] = [t() for t in live_storages_wrappers]\n    ptrs_to_deallocate = self.current_node.data_ptrs_dead_since_invocation()\n    torch._C._cuda_setCheckpointPoolState(device, state, stale_storages, live_storages_weak_refs)\n    for ptr in OrderedSet(ptrs_to_deallocate):\n        torch._C._cuda_cudaCachingAllocator_raw_delete(ptr)\n    if config.triton.slow_path_cudagraph_asserts:\n        check_memory_pool(self.device_index, self.cuda_graphs_thread_pool, live_storages_wrappers)\n        for wrapper in live_storages_wrappers:\n            storage_ptr = wrapper()\n            assert storage_ptr is not None\n            assert torch._C._has_Standard_Deleter(storage_ptr)\n            assert wrapper.data_ptr() not in ptrs_to_deallocate",
    "docstring": "Checkpoint the current execution state in the caching allocator so that additional cudagraph recordings can be made respecting existent live storages.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:apply_checkpoint_execution_state_in_allocator arg:self arguments arg Call Call Assign Assign BoolOp Compare Compare Call Assign Call Call Call Assign Call Call For Call Call If Call For Assign Call Compare Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "log_every_n",
    "source_code": "@tf_export(v1=['logging.log_every_n'])\ndef log_every_n(level, msg, n, *args):\n    count = _GetNextLogCountPerToken(_GetFileAndLine())\n    log_if(level, msg, not count % n, *args)",
    "docstring": "Log 'msg % args' at level 'level' once per 'n' times. Logs the 1st call, (N+1)st call, (2N+1)st call, etc. Not threadsafe. Args: level: The level at which to log. msg: The message to be logged. n: The number of times this should be called before it is logged. *args: The args to be substituted into the msg.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py",
    "ast_data": "FunctionDef name:log_every_n arg:level arg:msg arg:n arguments arg arg arg arg Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_compatible_with",
    "source_code": "def is_compatible_with(self, spec_or_value):\n    if not isinstance(spec_or_value, TypeSpec):\n        spec_or_value = type_spec_from_value(spec_or_value)\n    if type(self) is not type(spec_or_value):\n        return False\n    return self.__is_compatible(self._serialize(), spec_or_value._serialize())",
    "docstring": "Returns true if is compatible with this TypeSpec. Prefer using \"is_subtype_of\" and \"most_specific_common_supertype\" wherever possible. Args: spec_or_value: A TypeSpec or TypeSpec associated value to compare against.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:is_compatible_with arg:self arg:spec_or_value arguments arg arg If Call Assign Call If Compare Call Call Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "referenced_base_fields",
    "source_code": "@cached_property\ndef referenced_base_fields(self):\n    from django.db.models.sql import query\n    return {child.split(LOOKUP_SEP, 1)[0] for child in query.get_children_from_q(self)}",
    "docstring": "Retrieve all base fields referenced directly or through F expressions excluding any fields referenced through joins.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query_utils.py",
    "ast_data": "FunctionDef name:referenced_base_fields arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "symeig",
    "source_code": "def symeig(A: Tensor, largest: Optional[bool]=False) -> tuple[Tensor, Tensor]:\n    if largest is None:\n        largest = False\n    E, Z = torch.linalg.eigh(A, UPLO='U')\n    if largest:\n        E = torch.flip(E, dims=(-1,))\n        Z = torch.flip(Z, dims=(-1,))\n    return (E, Z)",
    "docstring": "Return eigenpairs of A with specified ordering.",
    "type": "function",
    "file_path": "pytorch\\torch\\_linalg_utils.py",
    "ast_data": "FunctionDef name:symeig arg:A arg:largest arguments arg arg If Compare Assign Assign Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "handle_tensor",
    "source_code": "def handle_tensor(x: Any) -> Any:\n    if isinstance(x, torch.Tensor):\n        return TensorMetadataHolder(_extract_tensor_metadata(x), x.device)\n    else:\n        return x",
    "docstring": "Pickle FakeTensor will result in error: AttributeError: Can't pickle local object 'WeakValueDictionary.__init__..remove' Convert all Tensor to metadata. This may also makes pickle faster.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\debug.py",
    "ast_data": "FunctionDef name:handle_tensor arg:x arguments arg If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "LateParamPageHandler",
    "source_code": "class LateParamPageHandler(PageHandler):\n\n    @property\n    def kwargs(self):\n        kwargs = cherrypy.serving.request.params.copy()\n        if self._kwargs:\n            kwargs.update(self._kwargs)\n        return kwargs\n\n    @kwargs.setter\n    def kwargs(self, kwargs):\n        cherrypy.serving.request.kwargs = kwargs\n        self._kwargs = kwargs",
    "docstring": "Page handler callable with delayed request parameters binding. When passing `` to the page handler, we do not want to capture that dict too early; we want to give tools like the decoding tool a chance to modify the params dict in-between the lookup of the handler and the actual calling of the handler. This subclass takes that into account, and allows request.params to be 'bound late' (it's more complicated than that, but that's the effect).",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "ClassDef name:LateParamPageHandler FunctionDef name:kwargs arg:self arguments arg Assign Call If Call Return return:yes FunctionDef name:kwargs arg:self arg:kwargs arguments arg arg Assign Assign"
  },
  {
    "library": "numpy",
    "name": "ediff1d",
    "source_code": "def ediff1d(arr, to_end=None, to_begin=None):\n    arr = ma.asanyarray(arr).flat\n    ed = arr[1:] - arr[:-1]\n    arrays = [ed]\n    if to_begin is not None:\n        arrays.insert(0, to_begin)\n    if to_end is not None:\n        arrays.append(to_end)\n    if len(arrays) != 1:\n        ed = hstack(arrays)\n    return ed",
    "docstring": "Compute the differences between consecutive elements of an array. This function is the equivalent of that takes masked values into account, see for details. See Also -------- numpy.ediff1d : Equivalent function for ndarrays. Examples -------- >>> import numpy as np >>> arr = np.ma.array([1, 2, 4, 7, 0]) >>> np.ma.ediff1d(arr) masked_array(data=[ 1, 2, 3, -7], mask=False, fill_value=999999)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "FunctionDef name:ediff1d arg:arr arg:to_end arg:to_begin arguments arg arg arg Assign Call Assign Assign If Compare Call If Compare Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "generate_yaml_from_profiles",
    "source_code": "def generate_yaml_from_profiles(op_profiles: dict[str, set[OpProfile]]) -> str:\n    import yaml\n    from torch._export.serde.serialize import _TORCH_TO_SERIALIZE_DTYPE, _TORCH_TO_SERIALIZE_LAYOUT\n\n    def serialize_tensor_metadata(t: TensorMetadata) -> dict:\n        return {'rank': t.rank, 'dtype': _TORCH_TO_SERIALIZE_DTYPE[t.dtype].value, 'device': str(t.device), 'layout': _TORCH_TO_SERIALIZE_LAYOUT[t.layout].value}\n\n    def serialize_op_profile(op: OpProfile) -> dict:\n        return {'args_profile': [serialize_tensor_metadata(arg) for arg in op.args_profile if arg is not None], 'out_profile': serialize_tensor_metadata(op.out_profile) if isinstance(op.out_profile, TensorMetadata) else [serialize_tensor_metadata(out) for out in op.out_profile]}\n    serialized_data = {operator: [serialize_op_profile(profile) for profile in profiles] for operator, profiles in op_profiles.items()}\n    return yaml.dump({'torch_version': get_torch_version(), 'operators': serialized_data}, sort_keys=False)",
    "docstring": "Generates a yaml string from the given operator profiles which can be saved to a file. The yaml string can be loaded back into an operator profile structure using .",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\fake_profile.py",
    "ast_data": "FunctionDef name:generate_yaml_from_profiles arg:op_profiles arguments arg FunctionDef name:serialize_tensor_metadata arg:t arguments arg Return return:yes Call FunctionDef name:serialize_op_profile arg:op arguments arg Return return:yes Call Compare Call Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "_inject_kwargs",
    "source_code": "def _inject_kwargs(self, func, kws, params):\n    func_params = signature(func).parameters\n    for key, val in params.items():\n        if key in func_params:\n            kws.setdefault(key, val)",
    "docstring": "Add params to kws if they are accepted by func.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:_inject_kwargs arg:self arg:func arg:kws arg:params arguments arg arg arg arg Assign Call For Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "release_buffer",
    "source_code": "def release_buffer(self, name):\n    assert name in self.local_buffers\n    return f'_{name}.release()'",
    "docstring": "Codegen the code to release the ownership of a local buffer to others",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_template_kernel.py",
    "ast_data": "FunctionDef name:release_buffer arg:self arg:name arguments arg arg Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "prepare_sample_weight_modes",
    "source_code": "def prepare_sample_weight_modes(training_endpoints, sample_weight_mode):\n    if isinstance(sample_weight_mode, collections.abc.Mapping):\n        generic_utils.check_for_unexpected_keys('sample_weight_mode', sample_weight_mode, [e.output_name for e in training_endpoints])\n        for end_point in training_endpoints:\n            if not end_point.should_skip_target_weights():\n                if end_point.output_name not in sample_weight_mode:\n                    raise ValueError('Output ' + end_point.output_name + 'missing from `_sample_weight_modes` dictionary')\n                else:\n                    end_point.sample_weight_mode = sample_weight_mode.get(end_point.output_name)\n    elif isinstance(sample_weight_mode, (list, tuple)):\n        if len(sample_weight_mode) != len(training_endpoints):\n            raise ValueError('When passing a list as sample_weight_mode, it should have one entry per model output. The model has ' + str(len(training_endpoints)) + ' outputs, but you passed ' + str(len(sample_weight_mode)) + '_sample_weight_modes.')\n        for mode, endpoint in zip(sample_weight_mode, training_endpoints):\n            if not endpoint.should_skip_target_weights():\n                endpoint.sample_weight_mode = mode\n    else:\n        for endpoint in training_endpoints:\n            if not endpoint.should_skip_target_weights():\n                endpoint.sample_weight_mode = sample_weight_mode",
    "docstring": "Prepares sample weight modes for the model. Args: training_endpoints: List of model _TrainingEndpoints. sample_weight_mode: sample weight mode user input passed from compile API. Raises: ValueError: In case of invalid input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:prepare_sample_weight_modes arg:training_endpoints arg:sample_weight_mode arguments arg arg If Call Call For If Call If Compare Raise Call Assign Call If Call If Compare Call Call Raise Call Call Call Call Call For Call If Call Assign For If Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "HammerTransform",
    "source_code": "class HammerTransform(_GeoTransform):\n\n    def transform_non_affine(self, values):\n        longitude, latitude = values.T\n        half_long = longitude / 2.0\n        cos_latitude = np.cos(latitude)\n        sqrt2 = np.sqrt(2.0)\n        alpha = np.sqrt(1.0 + cos_latitude * np.cos(half_long))\n        x = 2.0 * sqrt2 * (cos_latitude * np.sin(half_long)) / alpha\n        y = sqrt2 * np.sin(latitude) / alpha\n        return np.column_stack([x, y])\n\n    def inverted(self):\n        return HammerAxes.InvertedHammerTransform(self._resolution)",
    "docstring": "The base Hammer transform.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "ClassDef name:HammerTransform FunctionDef name:transform_non_affine arg:self arg:values arguments arg arg Assign Assign Assign Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes Call FunctionDef name:inverted arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_partitioner",
    "source_code": "def _partitioner(shape, dtype):\n    if not isinstance(shape, tensor_shape.TensorShape):\n        raise ValueError(f'shape is not a TensorShape: {shape}')\n    if not shape.is_fully_defined():\n        raise ValueError(f'shape is not fully defined: {shape}')\n    dtype = dtypes.as_dtype(dtype)\n    if dtype.base_dtype == dtypes.string:\n        element_size = bytes_per_string_element\n    else:\n        element_size = dtype.size\n    partitions = [1] * shape.ndims\n    bytes_per_slice = 1.0 * (shape.num_elements() / shape.dims[axis].value) * element_size\n    slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))\n    axis_shards = int(math.ceil(1.0 * shape.dims[axis].value / slices_per_shard))\n    if max_shards:\n        axis_shards = min(max_shards, axis_shards)\n    partitions[axis] = axis_shards\n    return partitions",
    "docstring": "Partitioner that partitions shards to have max_shard_bytes total size. Args: shape: A . dtype: A . Returns: A tuple representing how much to slice each axis in shape. Raises: ValueError: If shape is not a fully defined or dtype is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\partitioned_variables.py",
    "ast_data": "FunctionDef name:_partitioner arg:shape arg:dtype arguments arg arg If Call Raise Call If Call Raise Call Assign Call If Compare Assign Assign Assign Assign Call Assign Call Call Assign Call Call If Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_weakly_lesser_type",
    "source_code": "def is_weakly_lesser_type(a: type, b: type) -> bool:\n    a, b = (_maybe_get_pytype(a), _maybe_get_pytype(b))\n    if a not in _ordered_types or b not in _ordered_types:\n        raise RuntimeError(f'Expected builtin numeric types, found {a}, {b}')\n    for typ in _ordered_types:\n        if a == typ:\n            return True\n        if b == typ:\n            return False\n    raise RuntimeError('Unexpected termination!')",
    "docstring": "Compares two types, a and b, returning True if a is weakly \"less\" than b. The comparison is determined by the following type ordering: bool, int, float, complex.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:is_weakly_lesser_type arg:a arg:b arguments arg arg Assign Call Call If BoolOp Compare Compare Raise Call For If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_get_or_create_logger",
    "source_code": "def _get_or_create_logger(destination: str='null') -> logging.Logger:\n    global _events_loggers\n    if destination not in _events_loggers:\n        _events_logger = logging.getLogger(f'torchelastic-events-{destination}')\n        _events_logger.setLevel(os.environ.get('LOGLEVEL', 'INFO'))\n        _events_logger.propagate = False\n        logging_handler = get_logging_handler(destination)\n        _events_logger.addHandler(logging_handler)\n        _events_loggers[destination] = _events_logger\n    return _events_loggers[destination]",
    "docstring": "Construct python logger based on the destination type or extends if provided. Available destination could be found in `` module",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\events\\__init__.py",
    "ast_data": "FunctionDef name:_get_or_create_logger arg:destination arguments arg If Compare Assign Call Call Call Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_keras_serializable",
    "source_code": "def register_keras_serializable(package='Custom', name=None):\n\n    def decorator(arg):\n        class_name = name if name is not None else arg.__name__\n        registered_name = package + '>' + class_name\n        if tf_inspect.isclass(arg) and (not hasattr(arg, 'get_config')):\n            raise ValueError('Cannot register a class that does not have a get_config() method.')\n        if registered_name in _GLOBAL_CUSTOM_OBJECTS:\n            raise ValueError('%s has already been registered to %s' % (registered_name, _GLOBAL_CUSTOM_OBJECTS[registered_name]))\n        if arg in _GLOBAL_CUSTOM_NAMES:\n            raise ValueError('%s has already been registered to %s' % (arg, _GLOBAL_CUSTOM_NAMES[arg]))\n        _GLOBAL_CUSTOM_OBJECTS[registered_name] = arg\n        _GLOBAL_CUSTOM_NAMES[arg] = registered_name\n        return arg\n    return decorator",
    "docstring": "Registers an object with the Keras serialization framework. This decorator injects the decorated class or function into the Keras custom object dictionary, so that it can be serialized and deserialized without needing an entry in the user-provided custom object dict. It also injects a function that Keras will call to get the object's serializable string key. Note that to be serialized and deserialized, classes must implement the method. Functions do not have this requirement. The object will be registered under the key 'package>name' where , defaults to the object name if not passed. Args: package: The package that this class belongs to. name: The name to serialize this class under in this package. If None, the class' name will be used. Returns: A decorator that registers the decorated class with the passed names.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:register_keras_serializable arg:package arg:name arguments arg arg FunctionDef name:decorator arg:arg arguments arg Assign Compare Assign If BoolOp Call Call Raise Call If Compare Raise Call If Compare Raise Call Assign Assign Return return:yes Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "anonymous",
    "source_code": "def anonymous(self):\n    pass",
    "docstring": "Provide a temporary user name for anonymous users.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:anonymous arg:self arguments arg"
  },
  {
    "library": "scrapy",
    "name": "_new_stream",
    "source_code": "def _new_stream(self, request: Request, spider: Spider) -> Stream:\n    stream = Stream(stream_id=next(self._stream_id_generator), request=request, protocol=self, download_maxsize=getattr(spider, 'download_maxsize', self.metadata['default_download_maxsize']), download_warnsize=getattr(spider, 'download_warnsize', self.metadata['default_download_warnsize']))\n    self.streams[stream.stream_id] = stream\n    return stream",
    "docstring": "Instantiates a new Stream object",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py",
    "ast_data": "FunctionDef name:_new_stream arg:self arg:request arg:spider arguments arg arg arg Assign Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "matrix",
    "source_code": "def matrix(self) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Return the camera matrix.",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:matrix arg:self arguments arg Raise"
  },
  {
    "library": "django",
    "name": "login",
    "source_code": "@method_decorator(never_cache)\n@login_not_required\ndef login(self, request, extra_context=None):\n    if request.method == 'GET' and self.has_permission(request):\n        index_path = reverse('admin:index', current_app=self.name)\n        return HttpResponseRedirect(index_path)\n    from django.contrib.admin.forms import AdminAuthenticationForm\n    from django.contrib.auth.views import LoginView\n    context = {**self.each_context(request), 'title': _('Log in'), 'subtitle': None, 'app_path': request.get_full_path(), 'username': request.user.get_username()}\n    if REDIRECT_FIELD_NAME not in request.GET and REDIRECT_FIELD_NAME not in request.POST:\n        context[REDIRECT_FIELD_NAME] = reverse('admin:index', current_app=self.name)\n    context.update(extra_context or {})\n    defaults = {'extra_context': context, 'authentication_form': self.login_form or AdminAuthenticationForm, 'template_name': self.login_template or 'admin/login.html'}\n    request.current_app = self.name\n    return LoginView.as_view(**defaults)(request)",
    "docstring": "Display the login form for the given HttpRequest.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:login arg:self arg:request arg:extra_context arguments arg arg arg If BoolOp Compare Call Assign Call Return return:yes Call Assign Call Call Call Call If BoolOp Compare Compare Assign Call Call BoolOp Assign BoolOp BoolOp Assign Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_np_conv_ok",
    "source_code": "def _np_conv_ok(volume, kernel, mode, xp):\n    if volume.ndim == kernel.ndim == 1:\n        if mode in ('full', 'valid'):\n            return True\n        elif mode == 'same':\n            return xp_size(volume) >= xp_size(kernel)\n    else:\n        return False",
    "docstring": "See if numpy supports convolution of and (i.e. both are 1D ndarrays and of the appropriate shape). NumPy's 'same' mode uses the size of the larger input, while SciPy's uses the size of the first input. Invalid mode strings will return False and be caught by the calling func.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_np_conv_ok arg:volume arg:kernel arg:mode arg:xp arguments arg arg arg arg If Compare If Compare Return return:yes If Compare Return return:yes Compare Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "sortlevel",
    "source_code": "def sortlevel(self, level=None, ascending: bool | list[bool]=True, sort_remaining=None, na_position: NaPosition='first') -> tuple[Self, np.ndarray]:\n    if not isinstance(ascending, (list, bool)):\n        raise TypeError('ascending must be a single bool value ora list of bool values of length 1')\n    if isinstance(ascending, list):\n        if len(ascending) != 1:\n            raise TypeError('ascending must be a list of bool values of length 1')\n        ascending = ascending[0]\n    if not isinstance(ascending, bool):\n        raise TypeError('ascending must be a bool value')\n    return self.sort_values(return_indexer=True, ascending=ascending, na_position=na_position)",
    "docstring": "For internal compatibility with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : bool, default True False to sort in descending order na_position : {'first' or 'last'}, default 'first' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 2.1.0 level, sort_remaining are compat parameters Returns ------- Index",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:sortlevel arg:self arg:level arg:ascending arg:sort_remaining arg:na_position arguments arg arg arg arg arg If Call Raise Call If Call If Compare Call Raise Call Assign If Call Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_is_single_color",
    "source_code": "def _is_single_color(color: Color | Collection[Color]) -> bool:\n    if isinstance(color, str) and _is_single_string_color(color):\n        return True\n    if _is_floats_color(color):\n        return True\n    return False",
    "docstring": "Check if is a single color, not a sequence of colors. Single color is of these kinds: - Named color \"red\", \"C0\", \"firebrick\" - Alias \"g\" - Sequence of floats, such as (0.1, 0.2, 0.3) or (0.1, 0.2, 0.3, 0.4). See Also -------- _is_single_string_color",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\style.py",
    "ast_data": "FunctionDef name:_is_single_color arg:color arguments arg If BoolOp Call Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "reset_defaults",
    "source_code": "def reset_defaults():\n    mpl.rcParams.update(mpl.rcParamsDefault)",
    "docstring": "Restore all RC params to default settings.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\rcmod.py",
    "ast_data": "FunctionDef name:reset_defaults arguments Call"
  },
  {
    "library": "tensorflow",
    "name": "maybe_do_strip",
    "source_code": "def maybe_do_strip(node: node_def_pb2.NodeDef) -> None:\n    if node.op == 'Assert' or node.op == 'PrintV2':\n        node.op = 'NoOp'\n        erase_regular_node_attributes(node)\n        new_inputs = []\n        for inp in node.input:\n            if not is_control_input(inp):\n                new_inputs.append(as_control_dep(inp))\n            else:\n                new_inputs.append(inp)\n        node.ClearField('input')\n        node.input.extend(new_inputs)\n    elif node.op == 'CheckNumerics' or node.op == 'Print':\n        node.op = 'Identity'\n        prune_all_non_t_attributes(node)\n        for i in range(1, len(node.input)):\n            if not is_control_input(node.input[i]):\n                node.input[i] = as_control_dep(node.input[i])",
    "docstring": "Strips the graph from Assert and CheckNumerics ops. For Assert ops, this function also rewrites all of the inputs to the nodes that were transformed by making them into control dependencies. It also removes all of the regular node attributes, that is all node attributes that do not start with . For CheckNumerics ops, this function turns the op into an Identity op, which will be pruned later (according to the original implementation in grappler's . Then, since Identity ops only take one input, it leaves the first input as is while transforming the other ones into control dependencies. Args: node: The node to potentally strip.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:maybe_do_strip arg:node arguments arg If BoolOp Compare Compare Assign Call Assign For If Call Call Call Call Call Call If BoolOp Compare Compare Assign Call For Call Call If Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "replica_only_strategy",
    "source_code": "@register_op_strategy(aten._local_scalar_dense.default)\ndef replica_only_strategy(op_schema: OpSchema) -> StrategyType:\n    input_strategy = op_schema.args_schema[0]\n    assert isinstance(input_strategy, OpStrategy)\n    mesh = input_strategy.mesh\n    replicate_spec = DTensorSpec(mesh, tuple([Replicate()] * mesh.ndim))\n    return OpStrategy([PlacementStrategy(replicate_spec)])",
    "docstring": "Only allow replication on the input/output.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_tensor_ops.py",
    "ast_data": "FunctionDef name:replica_only_strategy arg:op_schema arguments arg Assign Call Assign Assign Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "bazel_command",
    "source_code": "def bazel_command(self, subcommand: str='test', extra_options: Tuple[str, ...]=()) -> List[str]:\n    options = _dict_to_cli_options(self.options)\n    configs = [f'--config={config}' for config in self.configs]\n    build_tag_filters = f'--build_tag_filters={','.join(self.build_tag_filters)}'\n    test_tag_filters = f'--test_tag_filters={','.join(self.test_tag_filters)}'\n    action_env = [f'--action_env={k}={v}' for k, v in self.action_env.items()]\n    test_env = [f'--test_env={k}={v}' for k, v in self.test_env.items()]\n    repo_env = [f'--repo_env={k}={v}' for k, v in self.repo_env.items()]\n    override_repository = [f'--override_repository={k}={v}' for k, v in self.override_repository.items()]\n    tag_filters = [build_tag_filters, test_tag_filters]\n    all_options = tag_filters + configs + action_env + test_env + repo_env + override_repository + options + list(extra_options)\n    return ['bazel', subcommand, *all_options, '--', *self.target_patterns]",
    "docstring": "Returns a bazel test command for this build. Args: subcommand: The subcommand to give to bazel. by default. extra_options: Extra options. For now just used to pass in . Returns: List of command line arguments",
    "type": "method",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\ci\\build.py",
    "ast_data": "FunctionDef name:bazel_command arg:self arg:subcommand arg:extra_options arguments arg arg arg Assign Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "initialize_communicator",
    "source_code": "def initialize_communicator(group_key, rank, group_size, communication_hint='auto', timeout_seconds=0):\n    return gen_collective_ops.collective_initialize_communicator(group_key=group_key, rank=rank, group_size=group_size, communication_hint=communication_hint, timeout_seconds=timeout_seconds)",
    "docstring": "Initializes a collective communicator. This creates a collective communicator, which represents membership to a collective group identified by the group_key. It should be called once per member of the group, and each member needs to be on a different device. It blocks until all members of the group run this op. Communicators of a group can only be initialized once. Trying to initialize communicators for an existing group key will result in an error. Args: group_key: an int32 identifying the group. rank: an specifying the rank of this device in the group. If specified, the rank is required to be unique in the group. group_size: an int32 . The size of the group. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include , , and . timeout_seconds: If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: A resource .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py",
    "ast_data": "FunctionDef name:initialize_communicator arg:group_key arg:rank arg:group_size arg:communication_hint arg:timeout_seconds arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "write_documents",
    "source_code": "def write_documents(self, docnames: Set[str]) -> None:\n    sorted_docnames = sorted(docnames)\n    if self.parallel_ok:\n        self._write_parallel(sorted_docnames, nproc=self.app.parallel - 1)\n    else:\n        self._write_serial(sorted_docnames)",
    "docstring": "Write all documents in *docnames*. This method can be overridden if a builder does not create output files for each document.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:write_documents arg:self arg:docnames arguments arg arg Assign Call If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "NetworkSavedModelSaver",
    "source_code": "class NetworkSavedModelSaver(model_serialization.ModelSavedModelSaver):\n\n    @property\n    def object_identifier(self):\n        return constants.NETWORK_IDENTIFIER",
    "docstring": "Network serialization.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\network_serialization.py",
    "ast_data": "ClassDef name:NetworkSavedModelSaver FunctionDef name:object_identifier arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_metadata_to_str",
    "source_code": "def _metadata_to_str(key, value):\n    if isinstance(value, datetime.datetime):\n        value = _datetime_to_pdf(value)\n    elif key == 'Trapped':\n        value = value.name.decode('ascii')\n    else:\n        value = str(value)\n    return f'{key}={{{value}}}'",
    "docstring": "Convert metadata key/value to a form that hyperref accepts.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:_metadata_to_str arg:key arg:value arguments arg arg If Call Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "write",
    "source_code": "def write(self, file_content):\n    self._prewrite_check()\n    self._writable_file.append(compat.as_bytes(file_content, encoding=self.__encoding))",
    "docstring": "Writes file_content to the file. Appends to the end of the file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:write arg:self arg:file_content arguments arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "distributed_mode",
    "source_code": "@property\ndef distributed_mode(self):\n    return bool(self._cluster_spec) and self._task_type != _TaskType.EVALUATOR",
    "docstring": "Whether it is distributed training or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:distributed_mode arg:self arguments arg Return return:yes BoolOp Call Compare"
  },
  {
    "library": "kornia",
    "name": "Sharpness",
    "source_code": "class Sharpness(OperationBase):\n\n    def __init__(self, initial_magnitude: Optional[float]=0.5, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(0.1, 1.9), temperature: float=0.1, symmetric_megnitude: bool=False) -> None:\n        super().__init__(K.RandomSharpness(magnitude_range, same_on_batch=False, p=initial_probability), initial_magnitude=[('sharpness', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude)",
    "docstring": "Apply sharpness operation. Args: initial_magnitude: the initial magnitude. initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. magnitude_range: the sampling range for random sampling and clamping the optimized magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:Sharpness FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "numpy",
    "name": "check_gcc_version_at_least",
    "source_code": "def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0):\n    cmd._check_compiler()\n    version = '.'.join([str(major), str(minor), str(patchlevel)])\n    body = textwrap.dedent('\\n        int\\n        main()\\n        {\\n        #if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\\\\\n                (__GNUC_MINOR__ < %(minor)d) || \\\\\\n                (__GNUC_PATCHLEVEL__ < %(patchlevel)d)\\n        #error gcc >= %(version)s required\\n        #endif\\n            return 0;\\n        }\\n        ')\n    kw = {'version': version, 'major': major, 'minor': minor, 'patchlevel': patchlevel}\n    return cmd.try_compile(body % kw, None, None)",
    "docstring": "Check that the gcc version is at least the specified version.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\command\\autodist.py",
    "ast_data": "FunctionDef name:check_gcc_version_at_least arg:cmd arg:major arg:minor arg:patchlevel arguments arg arg arg arg Call Assign Call Call Call Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "check_estimators_fit_returns_self",
    "source_code": "@ignore_warnings(category=FutureWarning)\ndef check_estimators_fit_returns_self(name, estimator_orig):\n    X, y = make_blobs(random_state=0, n_samples=21)\n    X = _enforce_estimator_tags_X(estimator_orig, X)\n    estimator = clone(estimator_orig)\n    y = _enforce_estimator_tags_y(estimator, y)\n    set_random_state(estimator)\n    assert estimator.fit(X, y) is estimator",
    "docstring": "Check if self is returned when calling fit.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:check_estimators_fit_returns_self arg:name arg:estimator_orig arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "TrainingState",
    "source_code": "class TrainingState(Enum):\n    FORWARD = auto()\n    PRE_BACKWARD = auto()\n    POST_BACKWARD = auto()\n    IDLE = auto()",
    "docstring": "Describes the training state of one FSDP state / parameter group.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_common.py",
    "ast_data": "ClassDef name:TrainingState Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get_graph_debug_info",
    "source_code": "def get_graph_debug_info(self, name):\n    with c_api_util.tf_buffer() as buffer_:\n        pywrap_tfe.TFE_ContextGetGraphDebugInfo(self._handle, name, buffer_)\n        proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)\n    graph_debug_info = graph_debug_info_pb2.GraphDebugInfo()\n    graph_debug_info.ParseFromString(proto_data)\n    return graph_debug_info",
    "docstring": "Get GraphDebugInfo associated with a function from the context. Args: name: function signature name. Returns: The requested GraphDebugInfo. Raises: tf.errors.NotFoundError: if name is not the name of a registered function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:get_graph_debug_info arg:self arg:name arguments arg arg With Call Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "deconvolve",
    "source_code": "def deconvolve(signal, divisor):\n    xp = array_namespace(signal, divisor)\n    num = xpx.atleast_nd(xp.asarray(signal), ndim=1, xp=xp)\n    den = xpx.atleast_nd(xp.asarray(divisor), ndim=1, xp=xp)\n    if num.ndim > 1:\n        raise ValueError('signal must be 1-D.')\n    if den.ndim > 1:\n        raise ValueError('divisor must be 1-D.')\n    N = num.shape[0]\n    D = den.shape[0]\n    if D > N:\n        quot = []\n        rem = num\n    else:\n        input = xp.zeros(N - D + 1, dtype=xp.float64)\n        input[0] = 1\n        quot = lfilter(num, den, input)\n        rem = num - convolve(den, quot, mode='full')\n    return (quot, rem)",
    "docstring": "Deconvolves `` Parameters ---------- signal : (N,) array_like Signal data, typically a recorded signal divisor : (N,) array_like Divisor data, typically an impulse response or filter that was applied to the original signal Returns ------- quotient : ndarray Quotient, typically the recovered original signal remainder : ndarray Remainder See Also -------- numpy.polydiv : performs polynomial division (same operation, but also accepts poly1d objects) Examples -------- Deconvolve a signal that's been filtered: >>> from scipy import signal >>> original = [0, 1, 0, 0, 1, 1, 0, 0] >>> impulse_response = [2, 1] >>> recorded = signal.convolve(impulse_response, original) >>> recorded array([0, 2, 1, 0, 2, 3, 1, 0, 0]) >>> recovered, remainder = signal.deconvolve(recorded, impulse_response) >>> recovered array([ 0., 1., 0., 0., 1., 1., 0., 0.])",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:deconvolve arg:signal arg:divisor arguments arg arg Assign Call Assign Call Call Assign Call Call If Compare Raise Call If Compare Raise Call Assign Assign If Compare Assign Assign Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "cheb2ap",
    "source_code": "def cheb2ap(N, rs):\n    if abs(int(N)) != N:\n        raise ValueError('Filter order must be a nonnegative integer')\n    elif N == 0:\n        return (np.array([]), np.array([]), 1)\n    de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)\n    mu = arcsinh(1.0 / de) / N\n    if N % 2:\n        m = np.concatenate((np.arange(-N + 1, 0, 2), np.arange(2, N, 2)))\n    else:\n        m = np.arange(-N + 1, N, 2)\n    z = -conjugate(1j / sin(m * pi / (2.0 * N)))\n    p = -exp(1j * pi * np.arange(-N + 1, N, 2) / (2 * N))\n    p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag\n    p = 1.0 / p\n    k = (np.prod(-p, axis=0) / np.prod(-z, axis=0)).real\n    return (z, p, k)",
    "docstring": "Return (z,p,k) for Nth-order Chebyshev type II analog lowpass filter. The returned filter prototype has attenuation of at least ``. See Also -------- cheby2 : Filter design function using this prototype",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:cheb2ap arg:N arg:rs arguments arg arg If Compare Call Call Raise Call If Compare Return return:yes Call Call Assign Call Assign Call If Assign Call Call Call Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "drange",
    "source_code": "def drange(dstart, dend, delta):\n    f1 = date2num(dstart)\n    f2 = date2num(dend)\n    step = delta.total_seconds() / SEC_PER_DAY\n    num = int(np.ceil((f2 - f1) / step))\n    dinterval_end = dstart + num * delta\n    if dinterval_end >= dend:\n        dinterval_end -= delta\n        num -= 1\n    f2 = date2num(dinterval_end)\n    return np.linspace(f1, f2, num + 1)",
    "docstring": "Return a sequence of equally spaced Matplotlib dates. The dates start at *dstart* and reach up to, but not including *dend*. They are spaced by *delta*. Parameters ---------- dstart, dend : The date limits. delta : Spacing of the dates. Returns ------- A list floats representing Matplotlib dates.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:drange arg:dstart arg:dend arg:delta arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Call Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "prepare",
    "source_code": "def prepare(self, model, config):\n    self.model = model\n    self.config = config\n    if self.config is None:\n        self.make_config_from_model(model)\n    for module_config in self.config:\n        assert isinstance(module_config, dict), 'config elements should be dicts not modules i.e.:[{`tensor_fqn`: `foo.bar.weight`}, {`tensor_fqn`: ... }, ...]'\n        assert isinstance(self.defaults, dict)\n        local_args = copy.deepcopy(self.defaults)\n        local_args.update(module_config)\n        tensor_fqn = local_args.get('tensor_fqn', None)\n        assert tensor_fqn is not None, 'tensor_fqn is a required argument in the sparsity config whichreplaces previous `module` and [module]`fqn` arguments'\n        info_from_tensor_fqn = get_arg_info_from_tensor_fqn(model, tensor_fqn)\n        for key in info_from_tensor_fqn.keys():\n            if key in local_args:\n                assert info_from_tensor_fqn[key] == local_args[key] or (key == 'tensor_fqn' and '.' + info_from_tensor_fqn[key] == local_args[key]), f'Given both `{key}` and `tensor_fqn` in the config, it is expected them to agree!'\n        local_args.update(info_from_tensor_fqn)\n        self.groups.append(local_args)\n    self._prepare()",
    "docstring": "Prepares a model, by adding the parametrizations. Note:: The model is modified inplace. If you need to preserve the original model, use copy.deepcopy.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\base_sparsifier.py",
    "ast_data": "FunctionDef name:prepare arg:self arg:model arg:config arguments arg arg arg Assign Assign If Compare Call For Call Call Assign Call Call Assign Call Compare Assign Call For Call If Compare BoolOp Compare BoolOp Compare Compare Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_rotation",
    "source_code": "def set_rotation(self, s):\n    if isinstance(s, Real):\n        self._rotation = float(s) % 360\n    elif cbook._str_equal(s, 'horizontal') or s is None:\n        self._rotation = 0.0\n    elif cbook._str_equal(s, 'vertical'):\n        self._rotation = 90.0\n    else:\n        raise ValueError(f\"rotation must be 'vertical', 'horizontal' or a number, not {s}\")\n    self.stale = True",
    "docstring": "Set the rotation of the text. Parameters ---------- s : float or {'vertical', 'horizontal'} The rotation angle in degrees in mathematically positive direction (counterclockwise). 'horizontal' equals 0, 'vertical' equals 90.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_rotation arg:self arg:s arguments arg arg If Call Assign Call If BoolOp Call Compare Assign If Call Assign Raise Call Assign"
  },
  {
    "library": "django",
    "name": "ask_not_null_alteration",
    "source_code": "def ask_not_null_alteration(self, field_name, model_name):\n    if not self.dry_run:\n        choice = self._choice_input(f\"It is impossible to change a nullable field '{field_name}' on {model_name} to non-nullable without providing a default. This is because the database needs something to populate existing rows.\\nPlease select a fix:\", ['Provide a one-off default now (will be set on all existing rows with a null value for this column)', 'Ignore for now. Existing rows that contain NULL values will have to be handled manually, for example with a RunPython or RunSQL operation.', 'Quit and manually define a default value in models.py.'])\n        if choice == 2:\n            return NOT_PROVIDED\n        elif choice == 3:\n            sys.exit(3)\n        else:\n            return self._ask_default()\n    return None",
    "docstring": "Changing a NULL field to NOT NULL.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\questioner.py",
    "ast_data": "FunctionDef name:ask_not_null_alteration arg:self arg:field_name arg:model_name arguments arg arg arg If Assign Call If Compare Return return:yes If Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "new_manager",
    "source_code": "@classmethod\ndef new_manager(cls, figure, num):\n    return cls.manager_class.create_with_canvas(cls, figure, num)",
    "docstring": "Create a new figure manager for *figure*, using this canvas class. Notes ----- This method should not be reimplemented in subclasses. If custom manager creation logic is needed, please reimplement ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:new_manager arg:cls arg:figure arg:num arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_OptionsExported",
    "source_code": "@tf_export('distribute.experimental.CommunicationOptions')\nclass _OptionsExported(object):\n\n    def __new__(cls, *args, **kwargs):\n        return Options(*args, **kwargs)\n\n    def __init__(self, bytes_per_pack=0, timeout_seconds=None, implementation=CommunicationImplementation.AUTO):\n        pass",
    "docstring": "Options for cross device communications like All-reduce. This can be passed to methods like to optimize collective operation performance. Note that these are only hints, which may or may not change the actual behavior. Some options only apply to certain strategy and are ignored by others. One common optimization is to break gradients all-reduce into multiple packs so that weight updates can overlap with gradient all-reduce. Examples:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_util.py",
    "ast_data": "ClassDef name:_OptionsExported FunctionDef name:__new__ arg:cls arguments arg arg arg Return return:yes Call FunctionDef name:__init__ arg:self arg:bytes_per_pack arg:timeout_seconds arg:implementation arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "subgroup_tile",
    "source_code": "@classmethod\ndef subgroup_tile(cls, tile_assignment, subgroup_modes):\n    if not isinstance(tile_assignment, _np.ndarray):\n        raise TypeError('SubgroupTile assignment must be of type np.ndarray')\n    if not isinstance(subgroup_modes, list):\n        raise TypeError('subgroup_modes in subgroup manual must be of type list')\n    if len(tile_assignment.shape) < len(subgroup_modes):\n        raise TypeError('SubgroupTile assignment must have rank larger than length of subgroup_modes')\n    for sharding_type in subgroup_modes:\n        if sharding_type not in [xla_data_pb2.OpSharding.REPLICATED, xla_data_pb2.OpSharding.MANUAL]:\n            raise TypeError('Each sharding_type in subgroup_modes in subgroup manual must be of type xla_data_pb2.OpSharding.REPLICATED or xla_data_pb2.OpSharding.MANUAL')\n    dims = list(tile_assignment.shape)\n    flattened_devices = tile_assignment.reshape(-1, order='C')\n    return Sharding(proto=xla_data_pb2.OpSharding(type=xla_data_pb2.OpSharding.OTHER, tile_assignment_dimensions=dims, tile_assignment_devices=list(flattened_devices), last_tile_dims=list(subgroup_modes)))",
    "docstring": "Returns a subgroup manual sharding attribute. This is similar to tile(), but tile_assignment has one or more dimension than the tensor, and subgroup_modes define the sharding types in the last dimensions of tile_assignment. Args: tile_assignment: An np.ndarray describing the topology of the tiling and which device will compute which part of the topology. subgroup_modes: sharding types for the dimension more than the tensor shape rank. Raises: TypeError: tile_assignment was not of np.array type or subgroup_modes has unsupported sharding type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:subgroup_tile arg:cls arg:tile_assignment arg:subgroup_modes arguments arg arg arg If Call Raise Call If Call Raise Call If Compare Call Call Raise Call For If Compare Raise Call Assign Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "rot_y",
    "source_code": "@classmethod\ndef rot_y(cls, y: Tensor) -> Se3:\n    zs = zeros_like(y)\n    return cls(So3.rot_y(y), stack((zs, zs, zs), -1))",
    "docstring": "Construct a y-axis rotation. Args: y: the y-axis rotation angle.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:rot_y arg:cls arg:y arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "min_scalar_type",
    "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)\ndef min_scalar_type(a):\n    return (a,)",
    "docstring": "min_scalar_type(a, /) For scalar ``, returns the vector's dtype unmodified. Floating point values are not demoted to integers, and complex values are not demoted to floats. Parameters ---------- a : scalar or array_like The value whose minimal data type is to be found. Returns ------- out : dtype The minimal data type. See Also -------- result_type, promote_types, dtype, can_cast Examples -------- >>> import numpy as np >>> np.min_scalar_type(10) dtype('uint8') >>> np.min_scalar_type(-260) dtype('int16') >>> np.min_scalar_type(3.1) dtype('float16') >>> np.min_scalar_type(1e50) dtype('float64') >>> np.min_scalar_type(np.arange(4,dtype='f8')) dtype('float64')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\multiarray.py",
    "ast_data": "FunctionDef name:min_scalar_type arg:a arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "image_to_string",
    "source_code": "def image_to_string(image: Tensor, max_width: int=256) -> str:\n    KORNIA_CHECK_IS_IMAGE(image, None, raises=True)\n    KORNIA_CHECK_SHAPE(image, ['C', 'H', 'W'])\n    if image.dtype not in [float16, float32, float64]:\n        image = image / 255.0\n    if image.shape[-1] > max_width:\n        image = kornia.geometry.resize(image, (image.size(-2) * max_width // image.size(-1), max_width))\n    image = (image * 255).long()\n    res = ''\n    for y in range(image.size(-2)):\n        for x in range(image.size(-1)):\n            r, g, b = image[:, y, x]\n            h = f'{r:2x}{g:2x}{b:2x}'\n            short, _ = rgb2short(h)\n            res += f'\\x1b[48;5;{short}m  '\n        res += '\\x1b[0m\\n'\n    return res",
    "docstring": "Obtain the closest xterm-256 approximation string from an image tensor. The tensor shall be either 0~1 float type or 0~255 long type. Args: image: an RGB image with shape :math:. max_width: maximum width of the input image.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\image_print.py",
    "ast_data": "FunctionDef name:image_to_string arg:image arg:max_width arguments arg arg Call Call If Compare Assign If Compare Assign Call Call Call Assign Call Assign For Call Call For Call Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "m",
    "source_code": "@property\ndef m(self):\n    if self.is_measured:\n        return capi.getm(self.ptr, 0)",
    "docstring": "Return the M coordinate for this Point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:m arg:self arguments arg If Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "range_pop",
    "source_code": "def range_pop():\n    return _nvtx.rangePop()",
    "docstring": "Pop a range off of a stack of nested range spans. Returns the zero-based depth of the range that is ended.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\nvtx.py",
    "ast_data": "FunctionDef name:range_pop arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_set_model_to_eval",
    "source_code": "@contextlib.contextmanager\ndef _set_model_to_eval(model):\n    if not isinstance(model, torch.jit.ScriptFunction):\n        originally_training = model.training\n        model.train(False)\n        try:\n            yield\n        finally:\n            model.train(originally_training)\n    else:\n        try:\n            yield\n        finally:\n            pass",
    "docstring": "Context manager to temporarily set the training mode of `` to eval.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\_pytorch_graph.py",
    "ast_data": "FunctionDef name:_set_model_to_eval arg:model arguments arg If Call Assign Call Try Call Try"
  },
  {
    "library": "tensorflow",
    "name": "_should_use_multi_device_iterator",
    "source_code": "def _should_use_multi_device_iterator(options):\n    if options is None or options.experimental_replication_mode == InputReplicationMode.PER_WORKER or (options.experimental_replication_mode == InputReplicationMode.PER_REPLICA and options.experimental_fetch_to_device):\n        return True\n    return False",
    "docstring": "Determine whether to use multi_device_iterator_ops.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_should_use_multi_device_iterator arg:options arguments arg If BoolOp Compare Compare BoolOp Compare Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "safe_inverse_with_mask",
    "source_code": "def safe_inverse_with_mask(A: Tensor) -> Tuple[Tensor, Tensor]:\n    if not isinstance(A, Tensor):\n        raise AssertionError(f'A must be Tensor. Got: {type(A)}.')\n    dtype_original = A.dtype\n    if dtype_original not in (torch.float32, torch.float64):\n        dtype = torch.float32\n    else:\n        dtype = dtype_original\n    inverse, info = inv_ex(A.to(dtype))\n    mask = info == 0\n    return (inverse.to(dtype_original), mask)",
    "docstring": "Perform inverse. Avoids crashing because of non-invertable matrix input and outputs the mask of valid solution.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\helpers.py",
    "ast_data": "FunctionDef name:safe_inverse_with_mask arg:A arguments arg If Call Raise Call Call Assign If Compare Assign Assign Assign Call Call Assign Compare Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "Rgb255ToRgb",
    "source_code": "class Rgb255ToRgb(Module):\n\n    def forward(self, image: Tensor) -> Tensor:\n        return rgb255_to_rgb(image)",
    "docstring": "Convert an image from RGB [0, 255] to RGB for visualization purposes. Returns: RGB version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = Rgb255ToRgb() >>> output = rgb(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "ClassDef name:Rgb255ToRgb FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_data_interval",
    "source_code": "def get_data_interval(self):\n    raise NotImplementedError('Derived must override')",
    "docstring": "Return the `` data limits of this axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_data_interval arg:self arguments arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "PerformanceWarning",
    "source_code": "class PerformanceWarning(Warning):\n    pass",
    "docstring": "Warning raised when there is a possible performance impact. See Also -------- DataFrame.set_index : Set the DataFrame index using existing columns. DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Examples -------- >>> df = pd.DataFrame( ... {\"jim\": [0, 0, 1, 1], \"joe\": [\"x\", \"x\", \"z\", \"y\"], \"jolie\": [1, 2, 3, 4]} ... ) >>> df = df.set_index([\"jim\", \"joe\"]) >>> df jolie jim joe 0 x 1 x 2 1 z 3 y 4 >>> df.loc[(1, \"z\")] # doctest: +SKIP # PerformanceWarning: indexing past lexsort depth may impact performance. df.loc[(1, 'z')] jolie jim joe 1 z 3",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:PerformanceWarning"
  },
  {
    "library": "tensorflow",
    "name": "_build_node_defs_list",
    "source_code": "def _build_node_defs_list(self):\n    self._node_defs = {node.name: node for node in self._graph_def.node}\n    if self._graph_def.library:\n        for func in self._graph_def.library.function:\n            self._node_defs.update({node.name: node for node in func.node_def if node.op in _CONTROL_FLOW_OPS})",
    "docstring": "Builds the list of NodeDefs in the GraphDef. This list consists of all NodeDefs in the main graph as well as all control flow NodeDefs in the functions. The remaining NodeDefs in the functions are not included because the op names are not unique and the variables are handled differently than the main graph. The control flow ops need to be extracted because they are need their attributes to be updated similar to the control flow ops in the main graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:_build_node_defs_list arg:self arguments arg Assign If For Call Compare"
  },
  {
    "library": "sphinx",
    "name": "SeeAlso",
    "source_code": "class SeeAlso(SphinxAdmonition):\n    node_class = addnodes.seealso",
    "docstring": "An admonition mentioning things to look at as reference.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\directives\\admonitions.py",
    "ast_data": "ClassDef name:SeeAlso Assign"
  },
  {
    "library": "scikit-learn",
    "name": "decision_path",
    "source_code": "def decision_path(self, X):\n    X = self._validate_X_predict(X)\n    indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer='threads')((delayed(tree.decision_path)(X, check_input=False) for tree in self.estimators_))\n    n_nodes = [0]\n    n_nodes.extend([i.shape[1] for i in indicators])\n    n_nodes_ptr = np.array(n_nodes).cumsum()\n    return (sparse_hstack(indicators).tocsr(), n_nodes_ptr)",
    "docstring": "Return the decision path in the forest. .. versionadded:: 0.18 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``. Returns ------- indicator : sparse matrix of shape (n_samples, n_nodes) Return a node indicator matrix where non zero elements indicates that the samples goes through the nodes. The matrix is of CSR format. n_nodes_ptr : ndarray of shape (n_estimators + 1,) The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]] gives the indicator value for the i-th estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_forest.py",
    "ast_data": "FunctionDef name:decision_path arg:self arg:X arguments arg arg Assign Call Assign Call Call Call Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "is_multipart",
    "source_code": "def is_multipart(self):\n    if self.forms:\n        return self.forms[0].is_multipart()\n    else:\n        return self.empty_form.is_multipart()",
    "docstring": "Return True if the formset needs to be multipart, i.e. it has FileInput, or False otherwise.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:is_multipart arg:self arguments arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "register_partial_reduction_pattern",
    "source_code": "def register_partial_reduction_pattern():\n    equiv_red = {aten.amax.default: aten.max.default, aten.amin.default: aten.min.default}\n    for red_op in (aten.amax.default, aten.amin.default):\n        inp = KeywordArg('input')\n        partial_reduc = CallFunction(red_op, inp, KeywordArg('reduced_dims'), KeywordArg('keepdim'))\n        full_reduc = CallFunction([red_op, equiv_red[red_op]], inp)\n\n        @register_graph_pattern(MultiOutputPattern([partial_reduc, full_reduc]), pass_dict=pass_patterns[2])\n        def reuse_partial(match, input, reduced_dims, keepdim):\n            partial_red, full_red = match.output_nodes()\n            if not statically_known_true(input.meta['val'].numel() >= 4096):\n                return True\n\n            def replacement(inp: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:\n                partial = partial_red.target(inp, reduced_dims, keepdim)\n                complete = full_red.target(partial)\n                return (partial, complete)\n            counters['inductor']['partial_reduction_reuse'] += 1\n            match.replace_by_example(replacement, [input])",
    "docstring": "Reuse partial reductions in complete reductions",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:register_partial_reduction_pattern arguments Assign For Assign Call Assign Call Call Call Assign Call FunctionDef name:reuse_partial arg:match arg:input arg:reduced_dims arg:keepdim arguments arg arg arg arg Assign Call If Call Compare Call Return return:yes FunctionDef name:replacement arg:inp arguments arg Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_opset_imports",
    "source_code": "def add_opset_imports(model: ir.Model) -> None:\n    for node in ir.traversal.RecursiveGraphIterator(model.graph):\n        domain = node.domain\n        _maybe_set_opset_version(model.opset_imports, domain, node.version)\n    for function in model.functions.values():\n        for node in ir.traversal.RecursiveGraphIterator(function):\n            domain = node.domain\n            _maybe_set_opset_version(function.opset_imports, domain, node.version)\n        for domain, version in function.opset_imports.items():\n            _maybe_set_opset_version(model.opset_imports, domain, version)",
    "docstring": "Collect all opsets used and add opset imports to the model and functions.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_ir_passes.py",
    "ast_data": "FunctionDef name:add_opset_imports arg:model arguments arg For Call Assign Call For Call For Call Assign Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "has_free_unbacked_symbols",
    "source_code": "def has_free_unbacked_symbols(x: IterateExprs) -> bool:\n    from sympy.core.traversal import iterargs\n    for s in _iterate_exprs(x):\n        for arg in iterargs(s):\n            if arg.is_Symbol and symbol_is_type(arg, (SymT.UNBACKED_INT, SymT.UNBACKED_FLOAT)):\n                return True\n    return False",
    "docstring": "Faster version of bool(free_unbacked_symbols(val))",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:has_free_unbacked_symbols arg:x arguments arg For Call For Call If BoolOp Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DerivedQuantizationSpec",
    "source_code": "@dataclass(eq=True, frozen=True)\nclass DerivedQuantizationSpec(QuantizationSpecBase):\n    derived_from: list[EdgeOrNode]\n    derive_qparams_fn: Callable[[list[ObserverOrFakeQuantize]], tuple[Tensor, Tensor]]\n    dtype: torch.dtype\n    quant_min: Optional[int] = None\n    quant_max: Optional[int] = None\n    qscheme: Optional[torch.qscheme] = None\n    ch_axis: Optional[int] = None\n    is_dynamic: bool = False",
    "docstring": "Quantization spec for the Tensors whose quantization parameters are derived from other Tensors",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\quantizer.py",
    "ast_data": "ClassDef name:DerivedQuantizationSpec Call"
  },
  {
    "library": "pandas",
    "name": "_is_memory_usage_qualified",
    "source_code": "@cache_readonly\ndef _is_memory_usage_qualified(self) -> bool:\n\n    def f(dtype) -> bool:\n        return is_object_dtype(dtype) or (is_string_dtype(dtype) and dtype.storage == 'python')\n    return any((f(level.dtype) for level in self.levels))",
    "docstring": "return a boolean if we need a qualified .info display",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_is_memory_usage_qualified arg:self arguments arg FunctionDef name:f arg:dtype arguments arg Return return:yes BoolOp Call BoolOp Call Compare Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_params",
    "source_code": "def get_params(self, deep=True):\n    params = dict()\n    cls = self.__class__\n    init = getattr(cls.__init__, 'deprecated_original', cls.__init__)\n    init_sign = signature(init)\n    args, varargs = ([], [])\n    for parameter in init_sign.parameters.values():\n        if parameter.kind != parameter.VAR_KEYWORD and parameter.name != 'self':\n            args.append(parameter.name)\n        if parameter.kind == parameter.VAR_POSITIONAL:\n            varargs.append(parameter.name)\n    if len(varargs) != 0:\n        raise RuntimeError(\"scikit-learn kernels should always specify their parameters in the signature of their __init__ (no varargs). %s doesn't follow this convention.\" % (cls,))\n    for arg in args:\n        params[arg] = getattr(self, arg)\n    return params",
    "docstring": "Get parameters of this kernel. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:get_params arg:self arg:deep arguments arg arg Assign Call Assign Assign Call Assign Call Assign For Call If BoolOp Compare Compare Call If Compare Call If Compare Call Raise Call For Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "fuse_seed_creation_pass",
    "source_code": "def fuse_seed_creation_pass(graph: torch.fx.Graph):\n    device_seeds = collections.defaultdict(list)\n    for node in graph.nodes:\n        if CallFunctionVarArgs(inductor_prims.seed).match(node):\n            device_seeds[node.args[0]].append(node)\n    if not device_seeds:\n        return 0\n    for device, seeds in device_seeds.items():\n        with graph.inserting_before(seeds[0]):\n            combined = graph.call_function(inductor_prims.seeds, (len(seeds), device))\n            with V.fake_mode:\n                combined.meta['val'] = torch.empty([len(seeds)], device=device, dtype=torch.int64)\n                combined.meta['tensor_meta'] = _extract_tensor_metadata(combined.meta['val'])\n        for idx, seed in enumerate(seeds):\n            with graph.inserting_before(seed):\n                new_seed = graph.call_function(inductor_prims.lookup_seed, (combined, idx))\n            seed.replace_all_uses_with(new_seed)\n            new_seed.meta.update(seed.meta)\n            graph.erase_node(seed)\n    return len(device_seeds)",
    "docstring": "Horizontally fuse all the seed generation on each device a = inductor_seed(dev) b = inductor_seed(dev) Becomes: seeds = inductor_seeds(2, dev) a = inductor_lookup_seed(seeds, 0) b = inductor_lookup_seed(seeds, 1) We do this because seed creation is entirely launch overhead bound.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\replace_random.py",
    "ast_data": "FunctionDef name:fuse_seed_creation_pass arg:graph arguments arg Assign Call For If Call Call Call If Return return:yes For Call With Call Assign Call Call With Assign Call Call Assign Call For Call With Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_AtomicCounter",
    "source_code": "class _AtomicCounter(object):\n    __slots__ = ['_value', '_lock']\n\n    def __init__(self):\n        self._value = 0\n        self._lock = threading.Lock()\n\n    def increment_and_get(self):\n        with self._lock:\n            self._value += 1\n            return self._value",
    "docstring": "A simple atomic counter.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "ClassDef name:_AtomicCounter Assign FunctionDef name:__init__ arg:self arguments arg Assign Assign Call FunctionDef name:increment_and_get arg:self arguments arg With Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "functions_to_serialize",
    "source_code": "@abc.abstractmethod\ndef functions_to_serialize(self, serialization_cache):\n    raise NotImplementedError",
    "docstring": "Returns extra functions to include when serializing a Keras object. Normally, when calling exporting an object to SavedModel, only the functions and objects defined by the user are saved. For example: Assigning trackable objects to attributes creates a graph, which is used for both checkpointing and SavedModel serialization. When the graph generated from attribute tracking is insufficient, extra objects and functions may be added at serialization time. For example, most models do not have their call function wrapped with a @tf.function decorator. This results in not being saved. Since Keras objects should be revivable from the SavedModel format, the call function is added as an extra function to serialize. This function and is called multiple times when exporting to SavedModel. Please use the cache to avoid generating new functions and objects. A fresh cache is created for each SavedModel export. Args: serialization_cache: Dictionary passed to all objects in the same object graph during serialization. Returns: A dictionary mapping attribute names to or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\base_serialization.py",
    "ast_data": "FunctionDef name:functions_to_serialize arg:self arg:serialization_cache arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "fused_module_supports_equalization",
    "source_code": "def fused_module_supports_equalization(module) -> bool:\n    return type(module) in [nni.LinearReLU, nni.ConvReLU1d, nni.ConvReLU2d, nni.ConvReLU3d]",
    "docstring": "Checks if the fused node supports equalization.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:fused_module_supports_equalization arg:module arguments arg Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "set_dash_joinstyle",
    "source_code": "@_docstring.interpd\ndef set_dash_joinstyle(self, s):\n    js = JoinStyle(s)\n    if self._dashjoinstyle != js:\n        self.stale = True\n    self._dashjoinstyle = js",
    "docstring": "How to join segments of the line if it . The default joinstyle is :rc:. Parameters ---------- s : or %(JoinStyle)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_dash_joinstyle arg:self arg:s arguments arg arg Assign Call If Compare Assign Assign"
  },
  {
    "library": "pandas",
    "name": "_SeriesInfoPrinter",
    "source_code": "class _SeriesInfoPrinter(_InfoPrinterAbstract):\n\n    def __init__(self, info: SeriesInfo, verbose: bool | None=None, show_counts: bool | None=None) -> None:\n        self.info = info\n        self.data = info.data\n        self.verbose = verbose\n        self.show_counts = self._initialize_show_counts(show_counts)\n\n    def _create_table_builder(self) -> _SeriesTableBuilder:\n        if self.verbose or self.verbose is None:\n            return _SeriesTableBuilderVerbose(info=self.info, with_counts=self.show_counts)\n        else:\n            return _SeriesTableBuilderNonVerbose(info=self.info)\n\n    def _initialize_show_counts(self, show_counts: bool | None) -> bool:\n        if show_counts is None:\n            return True\n        else:\n            return show_counts",
    "docstring": "Class for printing series info. Parameters ---------- info : SeriesInfo Instance of SeriesInfo. verbose : bool, optional Whether to print the full summary. show_counts : bool, optional Whether to show the non-null counts.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "ClassDef name:_SeriesInfoPrinter FunctionDef name:__init__ arg:self arg:info arg:verbose arg:show_counts arguments arg arg arg arg Assign Assign Assign Assign Call FunctionDef name:_create_table_builder arg:self arguments arg If BoolOp Compare Return return:yes Call Return return:yes Call FunctionDef name:_initialize_show_counts arg:self arg:show_counts arguments arg arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "chebpts2",
    "source_code": "def chebpts2(npts):\n    _npts = int(npts)\n    if _npts != npts:\n        raise ValueError('npts must be integer')\n    if _npts < 2:\n        raise ValueError('npts must be >= 2')\n    x = np.linspace(-np.pi, 0, _npts)\n    return np.cos(x)",
    "docstring": "Chebyshev points of the second kind. The Chebyshev points of the second kind are the points `` sorted in ascending order. Parameters ---------- npts : int Number of sample points desired. Returns ------- pts : ndarray The Chebyshev points of the second kind.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:chebpts2 arg:npts arguments arg Assign Call If Compare Raise Call If Compare Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "to_julian_date",
    "source_code": "def to_julian_date(self) -> npt.NDArray[np.float64]:\n    year = np.asarray(self.year)\n    month = np.asarray(self.month)\n    day = np.asarray(self.day)\n    testarr = month < 3\n    year[testarr] -= 1\n    month[testarr] += 12\n    return day + np.fix((153 * month - 457) / 5) + 365 * year + np.floor(year / 4) - np.floor(year / 100) + np.floor(year / 400) + 1721118.5 + (self.hour + self.minute / 60 + self.second / 3600 + self.microsecond / 3600 / 10 ** 6 + self.nanosecond / 3600 / 10 ** 9) / 24",
    "docstring": "Convert Datetime Array to float64 ndarray of Julian Dates. 0 Julian date is noon January 1, 4713 BC.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:to_julian_date arg:self arguments arg Assign Call Assign Call Assign Call Assign Compare Return return:yes Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "get_user",
    "source_code": "def get_user(self):\n    raise NotImplementedError()",
    "docstring": "A method to get the user object associated with this token: .. code-block:: def get_user(self): return User.get(self.user_id)",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:get_user arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "null_experiment",
    "source_code": "def null_experiment(args, model_iter_fn, model, example_inputs):\n    return []",
    "docstring": "A no-op experiment useful for making sure TorchBenchark alone works properly.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\dynamo\\common.py",
    "ast_data": "FunctionDef name:null_experiment arg:args arg:model_iter_fn arg:model arg:example_inputs arguments arg arg arg arg Return return:no"
  },
  {
    "library": "scipy",
    "name": "IntegratorConcurrencyError",
    "source_code": "class IntegratorConcurrencyError(RuntimeError):\n\n    def __init__(self, name):\n        msg = f'Integrator `{name}` can be used to solve only a single problem at a time. If you want to integrate multiple problems, consider using a different integrator (see `ode.set_integrator`)'\n        RuntimeError.__init__(self, msg)",
    "docstring": "Failure due to concurrent usage of an integrator that can be used only for a single problem at a time.",
    "type": "class",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "ClassDef name:IntegratorConcurrencyError FunctionDef name:__init__ arg:self arg:name arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "add_graph",
    "source_code": "def add_graph(self, graph, global_step=None, graph_def=None):\n    if graph is not None and graph_def is not None:\n        raise ValueError('Please pass only graph, or graph_def (deprecated), but not both.')\n    if isinstance(graph, ops.Graph) or isinstance(graph_def, ops.Graph):\n        if not isinstance(graph, ops.Graph):\n            logging.warning('When passing a `Graph` object, please use the `graph` named argument instead of `graph_def`.')\n            graph = graph_def\n        true_graph_def = graph.as_graph_def(add_shapes=True)\n        self._write_plugin_assets(graph)\n    elif isinstance(graph, graph_pb2.GraphDef) or isinstance(graph_def, graph_pb2.GraphDef):\n        logging.warning('Passing a `GraphDef` to the SummaryWriter is deprecated. Pass a `Graph` object instead, such as `sess.graph`.')\n        if isinstance(graph, graph_pb2.GraphDef):\n            true_graph_def = graph\n        else:\n            true_graph_def = graph_def\n    else:\n        raise TypeError('The passed graph must be an instance of `Graph` or the deprecated `GraphDef`')\n    self._add_graph_def(true_graph_def, global_step)",
    "docstring": "Adds a to the event file. The graph described by the protocol buffer will be displayed by TensorBoard. Most users pass a graph in the constructor instead. Args: graph: A object, such as . global_step: Number. Optional global step counter to record with the graph. graph_def: DEPRECATED. Use the parameter instead. Raises: ValueError: If both graph and graph_def are passed to the method.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py",
    "ast_data": "FunctionDef name:add_graph arg:self arg:graph arg:global_step arg:graph_def arguments arg arg arg arg If BoolOp Compare Compare Raise Call If BoolOp Call Call If Call Call Assign Assign Call Call If BoolOp Call Call Call If Call Assign Assign Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_resource_apply_sparse",
    "source_code": "def _resource_apply_sparse(self, grad, handle, indices):\n    raise RuntimeError('This function should never be called')",
    "docstring": "This function should never be called.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:_resource_apply_sparse arg:self arg:grad arg:handle arg:indices arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "GroupByPlot",
    "source_code": "@final\nclass GroupByPlot(PandasObject):\n\n    def __init__(self, groupby: GroupBy) -> None:\n        self._groupby = groupby\n\n    def __call__(self, *args, **kwargs):\n\n        def f(self):\n            return self.plot(*args, **kwargs)\n        f.__name__ = 'plot'\n        return self._groupby._python_apply_general(f, self._groupby._selected_obj)\n\n    def __getattr__(self, name: str):\n\n        def attr(*args, **kwargs):\n\n            def f(self):\n                return getattr(self.plot, name)(*args, **kwargs)\n            return self._groupby._python_apply_general(f, self._groupby._selected_obj)\n        return attr",
    "docstring": "Class implementing the .plot attribute for groupby objects.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "ClassDef name:GroupByPlot FunctionDef name:__init__ arg:self arg:groupby arguments arg arg Assign FunctionDef name:__call__ arg:self arguments arg arg arg FunctionDef name:f arg:self arguments arg Return return:yes Call Assign Return return:yes Call FunctionDef name:__getattr__ arg:self arg:name arguments arg arg FunctionDef name:attr arguments arg arg FunctionDef name:f arg:self arguments arg Return return:yes Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "isfunction",
    "source_code": "def isfunction(object):\n    return _inspect.isfunction(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.isfunction.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:isfunction arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "standardize_single_array",
    "source_code": "def standardize_single_array(x, expected_shape=None):\n    if x is None:\n        return None\n    if is_composite_or_composite_value(x):\n        return x\n    if isinstance(x, int):\n        raise ValueError('Expected an array data type but received an integer: {}'.format(x))\n    if x.shape is not None and len(x.shape) == 1 and (expected_shape is None or len(expected_shape) != 1):\n        if tensor_util.is_tf_type(x):\n            x = array_ops.expand_dims(x, axis=1)\n        else:\n            x = np.expand_dims(x, 1)\n    return x",
    "docstring": "Expand data of shape (x,) to (x, 1), unless len(expected_shape)==1.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:standardize_single_array arg:x arg:expected_shape arguments arg arg If Compare Return return:no If Call Return return:yes If Call Raise Call Call If BoolOp Compare Compare Call BoolOp Compare Compare Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "and_",
    "source_code": "def and_(a, b):\n    a_val = a()\n    if tensor_util.is_tf_type(a_val):\n        return _tf_lazy_and(a_val, b)\n    return _py_lazy_and(a_val, b)",
    "docstring": "Functional form of \"and\". Uses lazy evaluation semantics.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py",
    "ast_data": "FunctionDef name:and_ arg:a arg:b arguments arg arg Assign Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "date_extract_sql",
    "source_code": "def date_extract_sql(self, lookup_type, sql, params):\n    return (f'django_date_extract(%s, {sql})', (lookup_type.lower(), *params))",
    "docstring": "Support EXTRACT with a user-defined function django_date_extract() that's registered in connect(). Use single quotes because this is a string and could otherwise cause a collision with a field name.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\sqlite3\\operations.py",
    "ast_data": "FunctionDef name:date_extract_sql arg:self arg:lookup_type arg:sql arg:params arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "merged",
    "source_code": "@property\ndef merged(self):\n    return self._topology(capi.geos_linemerge(self.ptr))",
    "docstring": "Return the line merge of this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:merged arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "StreamReaderIterDataPipe",
    "source_code": "@functional_datapipe('read_from_stream')\nclass StreamReaderIterDataPipe(IterDataPipe[tuple[str, bytes]]):\n\n    def __init__(self, datapipe: IterDataPipe[tuple[str, IOBase]], chunk: Optional[int]=None):\n        self.datapipe = datapipe\n        self.chunk = chunk\n\n    def __iter__(self) -> Iterator[tuple[str, bytes]]:\n        for furl, stream in self.datapipe:\n            while True:\n                d = stream.read(self.chunk)\n                if not d:\n                    stream.close()\n                    break\n                yield (furl, d)",
    "docstring": "Given IO streams and their label names, yield bytes with label name as tuple. (functional name: ``, all bytes will be read until the EOF. Example: >>> # xdoctest: +SKIP >>> from torchdata.datapipes.iter import IterableWrapper, StreamReader >>> from io import StringIO >>> dp = IterableWrapper([(\"alphabet\", StringIO(\"abcde\"))]) >>> list(StreamReader(dp, chunk=1)) [('alphabet', 'a'), ('alphabet', 'b'), ('alphabet', 'c'), ('alphabet', 'd'), ('alphabet', 'e')]",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\streamreader.py",
    "ast_data": "ClassDef name:StreamReaderIterDataPipe FunctionDef name:__init__ arg:self arg:datapipe arg:chunk arguments arg arg arg Assign Assign FunctionDef name:__iter__ arg:self arguments arg For While Assign Call If Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_xy1",
    "source_code": "def set_xy1(self, *args, **kwargs):\n    params = _api.select_matching_signature([lambda self, x, y: locals(), lambda self, xy1: locals()], self, *args, **kwargs)\n    if 'x' in params:\n        _api.warn_deprecated('3.10', message='Passing x and y separately to AxLine.set_xy1 is deprecated since %(since)s; pass them as a single tuple instead.')\n        xy1 = (params['x'], params['y'])\n    else:\n        xy1 = params['xy1']\n    self._xy1 = xy1",
    "docstring": "Set the *xy1* value of the line. Parameters ---------- xy1 : tuple[float, float] Points for the line to pass through.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_xy1 arg:self arguments arg arg arg Assign Call arguments arg arg arg Call arguments arg arg Call If Compare Call Assign Assign Assign"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, data: Tensor, pixel_format: PixelFormat, layout: ImageLayout) -> None:\n    if layout.channels_order == ChannelsOrder.CHANNELS_FIRST:\n        shape = [str(layout.channels), str(layout.image_size.height), str(layout.image_size.width)]\n    elif layout.channels_order == ChannelsOrder.CHANNELS_LAST:\n        shape = [str(layout.image_size.height), str(layout.image_size.width), str(layout.channels)]\n    else:\n        raise NotImplementedError(f'Layout {layout.channels_order} not implemented.')\n    KORNIA_CHECK_SHAPE(data, shape)\n    KORNIA_CHECK(data.element_size() == pixel_format.bit_depth // 8, 'Invalid bit depth.')\n    self._data = data\n    self._pixel_format = pixel_format\n    self._layout = layout",
    "docstring": "Image constructor. Args: data: a torch tensor containing the image data. pixel_format: the pixel format of the image. layout: a dataclass containing the image layout information.",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:data arg:pixel_format arg:layout arguments arg arg arg arg If Compare Assign Call Call Call If Compare Assign Call Call Call Raise Call Call Call Compare Call Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_matrix",
    "source_code": "def set_matrix(self, mtx):\n    self._mtx = mtx\n    self.invalidate()",
    "docstring": "Set the underlying transformation matrix from a 3x3 array:: a c e b d f 0 0 1 .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:set_matrix arg:self arg:mtx arguments arg arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_path_to_root",
    "source_code": "@property\ndef _path_to_root(self) -> Generator[CUDAGraphNode, None, None]:\n    node = self\n    while node:\n        yield node\n        node = node.parent",
    "docstring": "Returns all nodes in the path starting at self and ending at root",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:_path_to_root arg:self arguments arg Assign While Assign"
  },
  {
    "library": "pytorch",
    "name": "_is_optional",
    "source_code": "def _is_optional(type_: type) -> bool:\n    origin_type = typing.get_origin(type_)\n    if origin_type is Union and type(None) in typing.get_args(type_):\n        return True\n    if origin_type is Optional:\n        return True\n    if hasattr(types, 'UnionType') and origin_type is types.UnionType and (type(None) in typing.get_args(type_)):\n        return True\n    return False",
    "docstring": "Returns whether a type_ is an Optional.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_schemas.py",
    "ast_data": "FunctionDef name:_is_optional arg:type_ arguments arg Assign Call If BoolOp Compare Compare Call Call Return return:yes If Compare Return return:yes If BoolOp Call Compare Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "supported_extensions",
    "source_code": "@property\ndef supported_extensions(self) -> tuple[str, ...]:\n    return self._supported_extensions",
    "docstring": "Extensions that writer engine supports.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:supported_extensions arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_nanmedian",
    "source_code": "def _nanmedian(a, axis=None, out=None, overwrite_input=False):\n    if axis is None or a.ndim == 1:\n        part = a.ravel()\n        if out is None:\n            return _nanmedian1d(part, overwrite_input)\n        else:\n            out[...] = _nanmedian1d(part, overwrite_input)\n            return out\n    else:\n        if a.shape[axis] < 600:\n            return _nanmedian_small(a, axis, out, overwrite_input)\n        result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)\n        if out is not None:\n            out[...] = result\n        return result",
    "docstring": "Private function that doesn't support extended axis or keepdims. These methods are extended to this function using _ureduce See nanmedian for parameter usage",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py",
    "ast_data": "FunctionDef name:_nanmedian arg:a arg:axis arg:out arg:overwrite_input arguments arg arg arg arg If BoolOp Compare Compare Assign Call If Compare Return return:yes Call Assign Call Return return:yes If Compare Return return:yes Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_generate_indices",
    "source_code": "def _generate_indices(random_state, bootstrap, n_population, n_samples):\n    if bootstrap:\n        indices = random_state.randint(0, n_population, n_samples)\n    else:\n        indices = sample_without_replacement(n_population, n_samples, random_state=random_state)\n    return indices",
    "docstring": "Draw randomly sampled indices.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:_generate_indices arg:random_state arg:bootstrap arg:n_population arg:n_samples arguments arg arg arg arg If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X, y=None, groups=None):\n    if groups is not None:\n        warnings.warn(f'The groups parameter is ignored by {self.__class__.__name__}', UserWarning)\n    return super().split(X, y, groups=groups)",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) The target variable for supervised learning problems. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "aoti_load_package",
    "source_code": "def aoti_load_package(path: FileLike, run_single_threaded: bool=False, device_index: int=-1) -> Any:\n    from torch._inductor.package import load_package\n    return load_package(path, run_single_threaded=run_single_threaded, device_index=device_index)",
    "docstring": "Loads the model from the PT2 package. If multiple models were packaged into the PT2, this will load the default model. To load a specific model, you can directly call the load API .. code-block:: python from torch._inductor.package import load_package compiled_model1 = load_package(\"my_package.pt2\", \"model1\") compiled_model2 = load_package(\"my_package.pt2\", \"model2\") Args: path: Path to the .pt2 package run_single_threaded (bool): Whether the model should be run without thread synchronization logic. This is useful to avoid conflicts with CUDAGraphs. device_index (int): The index of the device to which the PT2 package is to be loaded. By default, is used, which corresponds to the device when using CUDA. Passing would load the package to , for example.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\__init__.py",
    "ast_data": "FunctionDef name:aoti_load_package arg:path arg:run_single_threaded arg:device_index arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "define_kernel",
    "source_code": "def define_kernel(self, kernels):\n    kernel_names = tuple((k.kernel_name for k in kernels))\n    if kernel_names in self.subkernel_to_kernel_name:\n        return self.subkernel_to_kernel_name[kernel_names]\n    multi_kernel_name = f'multi_kernel_{len(self.subkernel_to_kernel_name)}'\n    self.subkernel_to_kernel_name[kernel_names] = multi_kernel_name\n    if V.graph.cpp_wrapper and (not config.triton.autotune_at_compile_time):\n        return multi_kernel_name\n    buf = self.kernel_defs\n    buf.writeline('')\n    buf.writeline(f'{multi_kernel_name} = async_compile.multi_kernel({multi_kernel_name!r}, [')\n    with buf.indent():\n        for name in kernel_names:\n            buf.writeline(f'{name},')\n    buf.writeline('])')\n    if config.triton.autotune_at_compile_time:\n        V.graph.wrapper_code.src_to_kernel['\\n'.join(kernel_names)] = multi_kernel_name\n    return multi_kernel_name",
    "docstring": "Previously we name the multi kernel as \"multi_kernel_{kernel_names[0]}\". This has some minor issue. E.g. for persistent reduction , there are 2 flavors of non-persistent reduction: and The only different is cache eviction policy. We should name the multi-kernel differently in these 2 cases.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\multi_kernel.py",
    "ast_data": "FunctionDef name:define_kernel arg:self arg:kernels arguments arg arg Assign Call If Compare Return return:yes Assign Call Assign If BoolOp Return return:yes Assign Call Call With Call For Call Call If Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "len",
    "source_code": "def len(self):\n    result = self._data.array._str_len()\n    return self._wrap_result(result, returns_string=False)",
    "docstring": "Compute the length of each element in the Series/Index. The element may be a sequence (such as a string, tuple or list) or a collection (such as a dictionary). Returns ------- Series or Index of int A Series or Index of integer values indicating the length of each element in the Series or Index. See Also -------- str.len : Python built-in function returning the length of an object. Series.size : Returns the length of the Series. Examples -------- Returns the length (number of characters) in a string. Returns the number of entries for dictionaries, lists or tuples. >>> s = pd.Series( ... [\"dog\", \"\", 5, {\"foo\": \"bar\"}, [2, 3, 5, 7], (\"one\", \"two\", \"three\")] ... ) >>> s 0 dog 1 2 5 3 {'foo': 'bar'} 4 [2, 3, 5, 7] 5 (one, two, three) dtype: object >>> s.str.len() 0 3.0 1 0.0 2 NaN 3 1.0 4 4.0 5 3.0 dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:len arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "Denormalize",
    "source_code": "class Denormalize(IntensityAugmentationBase2D):\n\n    def __init__(self, mean: Union[Tensor, Tuple[float], List[float], float], std: Union[Tensor, Tuple[float], List[float], float], p: float=1.0, keepdim: bool=False) -> None:\n        super().__init__(p=p, same_on_batch=True, keepdim=keepdim)\n        if isinstance(mean, float):\n            mean = torch.tensor([mean])\n        if isinstance(std, float):\n            std = torch.tensor([std])\n        if isinstance(mean, (tuple, list)):\n            mean = torch.tensor(mean)\n        if isinstance(std, (tuple, list)):\n            std = torch.tensor(std)\n        self.flags = {'mean': mean, 'std': std}\n\n    def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n        return denormalize(input, flags['mean'], flags['std'])",
    "docstring": "Denormalize tensor images with mean and standard deviation. .. math:: \\text{input[channel] = (input[channel] * std[channel]) + mean[channel]} Where is :math: and :math: for channels, Args: mean: Mean for each channel. std: Standard deviations for each channel. same_on_batch: apply the same transformation across the batch. p: probability of applying the transformation. keepdim: whether to keep the output shape the same as input (True) or broadcast it to the batch form (False). Return: Denormalised tensor with same size as input :math:. .. note:: This function internally uses :func:. Examples: >>> norm = Denormalize(mean=torch.zeros(1, 4), std=torch.ones(1, 4)) >>> x = torch.rand(1, 4, 3, 3) >>> out = norm(x) >>> out.shape torch.Size([1, 4, 3, 3])",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\denormalize.py",
    "ast_data": "ClassDef name:Denormalize FunctionDef name:__init__ arg:self arg:mean arg:std arg:p arg:keepdim arguments arg arg arg arg arg Call Call If Call Assign Call If Call Assign Call If Call Assign Call If Call Assign Call Assign FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "describe_categorical",
    "source_code": "@property\n@abstractmethod\ndef describe_categorical(self) -> CategoricalDescription:\n    pass",
    "docstring": "If the dtype is categorical, there are two options: - There are only values in the data buffer. - There is a separate non-categorical Column encoding for categorical values. Raises TypeError if the dtype is not categorical Returns the dictionary with description on how to interpret the data buffer: - \"is_ordered\" : bool, whether the ordering of dictionary indices is semantically meaningful. - \"is_dictionary\" : bool, whether a mapping of categorical values to other objects exists - \"categories\" : Column representing the (implicit) mapping of indices to category values (e.g. an array of cat1, cat2, ...). None if not a dictionary-style categorical. TBD: are there any other in-memory representations that are needed?",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:describe_categorical arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "set_theta_offset",
    "source_code": "def set_theta_offset(self, offset):\n    mtx = self._theta_offset.get_matrix()\n    mtx[0, 2] = offset\n    self._theta_offset.invalidate()",
    "docstring": "Set the offset for the location of 0 in radians.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:set_theta_offset arg:self arg:offset arguments arg arg Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_generate_signatures",
    "source_code": "def _generate_signatures(signature_functions: dict[str, Callable[..., Any]], object_map: object_identity.ObjectIdentityDictionary, defaults=None):\n    signatures = {}\n    for signature_key, function in sorted(signature_functions.items()):\n        if function.graph.captures:\n            argument_inputs = function.graph.inputs[:-len(function.graph.captures)]\n        else:\n            argument_inputs = function.graph.inputs\n        mapped_inputs, exterior_argument_placeholders = _map_function_arguments_to_created_inputs(argument_inputs, signature_key, function.name, defaults)\n        kwarg_names = list(sorted(object_map[function].function.structured_input_signature[1].keys()))\n        outputs = object_map[function](**{kwarg_name: mapped_input for kwarg_name, mapped_input in zip(kwarg_names, mapped_inputs)})\n        signatures[signature_key] = signature_def_utils.build_signature_def(_tensor_dict_to_tensorinfo(exterior_argument_placeholders), _tensor_dict_to_tensorinfo(outputs), method_name=signature_constants.PREDICT_METHOD_NAME, defaults=defaults.get(signature_key, None))\n    return signatures",
    "docstring": "Validates and calls in the exported graph. Args: signature_functions: A dictionary mapping string keys to concrete TensorFlow functions (e.g. from ) which will be used to generate SignatureDefs. object_map: A dictionary that contains mappings from signature functions to concrete functions in the exported graph. defaults: A dictionary mapping signature_key to dictionary of user_specified_name to Tensor representing default values. Returns: Each function in the dictionary is called with placeholder Tensors, generating a function call operation and output Tensors. The placeholder Tensors, the function call operation, and the output Tensors from the function call are part of the default Graph. This function then returns a dictionary with the same structure as , with the concrete functions replaced by SignatureDefs implicitly containing information about how to call each function from a TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference the generated placeholders and Tensor outputs by name. The caller is expected to include the default Graph set while calling this function as a MetaGraph in a SavedModel, including the returned SignatureDefs as part of that MetaGraph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:_generate_signatures arg:signature_functions arg:object_map arg:defaults arguments arg arg arg Assign For Call Call If Assign Call Assign Assign Call Assign Call Call Call Assign Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "normalize",
    "source_code": "def normalize(input: Tensor, p: float=2.0, dim: int=1, eps: float=1e-12, out: Optional[Tensor]=None) -> Tensor:\n    if has_torch_function_variadic(input, out):\n        return handle_torch_function(normalize, (input, out), input, p=p, dim=dim, eps=eps, out=out)\n    if out is None:\n        denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input)\n        return input / denom\n    else:\n        denom = input.norm(p, dim, keepdim=True).clamp_min_(eps).expand_as(input)\n        return torch.div(input, denom, out=out)",
    "docstring": "Perform :math: normalization of inputs over specified dimension. For a tensor :attr: of sizes :math:, each :math: -element vector :math: along dimension :attr: is transformed as .. math:: v = \\frac{v}{\\max(\\lVert v \\rVert_p, \\epsilon)}. With the default arguments it uses the Euclidean norm over vectors along dimension :math: for normalization. Args: input: input tensor of any shape p (float): the exponent value in the norm formulation. Default: 2 dim (int or tuple of ints): the dimension to reduce. Default: 1 eps (float): small value to avoid division by zero. Default: 1e-12 out (Tensor, optional): the output tensor. If :attr: is used, this operation won't be differentiable.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:normalize arg:input arg:p arg:dim arg:eps arg:out arguments arg arg arg arg arg If Call Return return:yes Call If Compare Assign Call Call Call Return return:yes Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "create_bearer_token_generator",
    "source_code": "def create_bearer_token_generator(self):\n    conf = self.config.get('access_token_generator', True)\n    access_token_generator = create_token_generator(conf, 42)\n    conf = self.config.get('refresh_token_generator', False)\n    refresh_token_generator = create_token_generator(conf, 48)\n    conf = self.config.get('token_expires_in')\n    expires_generator = create_token_expires_in_generator(conf)\n    return BearerTokenGenerator(access_token_generator=access_token_generator, refresh_token_generator=refresh_token_generator, expires_generator=expires_generator)",
    "docstring": "Default method to create BearerToken generator.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\django_oauth2\\authorization_server.py",
    "ast_data": "FunctionDef name:create_bearer_token_generator arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_normalize_feature_columns",
    "source_code": "def _normalize_feature_columns(feature_columns):\n    if isinstance(feature_columns, fc_types.FeatureColumn):\n        feature_columns = [feature_columns]\n    if isinstance(feature_columns, collections_abc.Iterator):\n        feature_columns = list(feature_columns)\n    if isinstance(feature_columns, dict):\n        raise ValueError('Expected feature_columns to be iterable, found dict.')\n    for column in feature_columns:\n        if not isinstance(column, fc_types.FeatureColumn):\n            raise ValueError('Items of feature_columns must be a FeatureColumn. Given (type {}): {}.'.format(type(column), column))\n    if not feature_columns:\n        raise ValueError('feature_columns must not be empty.')\n    name_to_column = {}\n    for column in feature_columns:\n        if column.name in name_to_column:\n            raise ValueError('Duplicate feature column name found for columns: {} and {}. This usually means that these columns refer to same base feature. Either one must be discarded or a duplicated but renamed item must be inserted in features dict.'.format(column, name_to_column[column.name]))\n        name_to_column[column.name] = column\n    return sorted(feature_columns, key=lambda x: x.name)",
    "docstring": "Normalizes the input. This method converts the to list type as best as it can. In addition, verifies the type and other parts of feature_columns, required by downstream library. Args: feature_columns: The raw feature columns, usually passed by users. Returns: The normalized feature column list. Raises: ValueError: for any invalid inputs, such as empty, duplicated names, etc.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_normalize_feature_columns arg:feature_columns arguments arg If Call Assign If Call Assign Call If Call Raise Call For If Call Raise Call Call Call If Raise Call Assign For If Compare Raise Call Call Assign Return return:yes Call arguments arg"
  },
  {
    "library": "pytorch",
    "name": "transform_algebraic_expression",
    "source_code": "def transform_algebraic_expression(expr, counter, dimension_dict):\n    assert is_algebraic_expression(expr) or is_dim(expr)\n    if is_dim(expr):\n        transformed, counter = transform_dimension(expr, counter, dimension_dict)\n        return (transformed.arg(1), counter)\n    elif isinstance(expr, Prod):\n        dims = []\n        for dim in expr.products:\n            assert is_dim(dim)\n            d, counter = transform_dimension(dim, counter, dimension_dict)\n            dims.append(d.arg(1))\n        return (z3.Product(dims), counter)\n    elif is_algebraic_expression(expr):\n        lhs, counter = transform_algebraic_expression(expr.lhs, counter, dimension_dict)\n        rhs, counter = transform_algebraic_expression(expr.rhs, counter, dimension_dict)\n        if expr.op == op_sub:\n            c = lhs - rhs\n        elif expr.op == op_add:\n            c = lhs + rhs\n        elif expr.op == op_div:\n            c = lhs / rhs\n        elif expr.op == op_mul:\n            c = lhs * rhs\n        elif expr.op == op_mod:\n            c = lhs % rhs\n        else:\n            raise NotImplementedError('operation not yet implemented')\n        return (c, counter)\n    else:\n        raise RuntimeError",
    "docstring": "Transforms an algebraic expression to z3 format Args: expr: An expression is either a dimension variable or an algebraic-expression Returns: the transformed expression",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\transform_to_z3.py",
    "ast_data": "FunctionDef name:transform_algebraic_expression arg:expr arg:counter arg:dimension_dict arguments arg arg arg BoolOp Call Call If Call Assign Call Return return:yes Call If Call Assign For Call Assign Call Call Call Return return:yes Call If Call Assign Call Assign Call If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Raise Call Return return:yes Raise"
  },
  {
    "library": "django",
    "name": "pixel_count",
    "source_code": "@property\ndef pixel_count(self):\n    return self.width * self.height",
    "docstring": "Return the total number of pixels in this band.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\band.py",
    "ast_data": "FunctionDef name:pixel_count arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "encode",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_code_dispatcher)\ndef encode(a, encoding=None, errors=None):\n    return _to_bytes_or_str_array(_vec_string(a, np.object_, 'encode', _clean_args(encoding, errors)), np.bytes_(b''))",
    "docstring": "Calls :meth: element-wise. The set of available codecs comes from the Python standard library, and may be extended at runtime. For more information, see the :mod: module. Parameters ---------- a : array_like, with `` dtype encoding : str, optional The name of an encoding errors : str, optional Specifies how to handle encoding errors Returns ------- out : ndarray See Also -------- str.encode Notes ----- The type of the result will depend on the encoding specified. Examples -------- >>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.encode(a, encoding='cp037') array([b'ÁÁÁ', b'@@Á@@', b'ÂÁÂ'], dtype='|S7')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:encode arg:a arg:encoding arg:errors arguments arg arg arg Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "uvicorn",
    "name": "pause_writing",
    "source_code": "def pause_writing(self) -> None:\n    self.flow.pause_writing()",
    "docstring": "Called by the transport when the write buffer exceeds the high water mark.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\http\\httptools_impl.py",
    "ast_data": "FunctionDef name:pause_writing arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "str2bool",
    "source_code": "def str2bool(v):\n    if v.lower() in ('yes', 'true', 't', 'y', '1'):\n        return True\n    elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n        return False\n    else:\n        raise argparse.ArgumentTypeError('Boolean value expected.')",
    "docstring": "ArgumentParser doesn't support type=bool. Thus, this helper method will convert from possible string types to True / False.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "FunctionDef name:str2bool arg:v arguments arg If Compare Call Return return:yes If Compare Call Return return:yes Raise Call"
  },
  {
    "library": "scipy",
    "name": "tck",
    "source_code": "@property\ndef tck(self):\n    return (self.t, self.c, self.k)",
    "docstring": "Equivalent to `` (read-only).",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:tck arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, eigs, seed=None, tol=1e-13, diag_tol=1e-07):\n    return random_correlation_frozen(eigs, seed=seed, tol=tol, diag_tol=diag_tol)",
    "docstring": "Create a frozen random correlation matrix. See for more information.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:eigs arg:seed arg:tol arg:diag_tol arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_SaveDataset",
    "source_code": "class _SaveDataset(dataset_ops.UnaryDataset):\n\n    def __init__(self, dataset, path, shard_func, compression):\n        self._element_spec = dataset.element_spec\n        self._shard_func = shard_func\n        dataset, shard_func, use_shard_func, path = set_save_dataset_attributes(dataset, shard_func, path)\n        variant_tensor = ged_ops.save_dataset_v2(dataset._variant_tensor, path=path, shard_func_other_args=shard_func.captured_inputs, shard_func=shard_func, use_shard_func=use_shard_func, compression=compression, output_types=structure.get_flat_tensor_types(dataset.element_spec), output_shapes=structure.get_flat_tensor_shapes(dataset.element_spec))\n        super().__init__(dataset, variant_tensor)\n\n    def _functions(self):\n        return [self._shard_func]\n\n    @property\n    def element_spec(self):\n        return self._element_spec",
    "docstring": "\"A dataset that loads previously saved dataset.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\save_op.py",
    "ast_data": "ClassDef name:_SaveDataset FunctionDef name:__init__ arg:self arg:dataset arg:path arg:shard_func arg:compression arguments arg arg arg arg arg Assign Assign Assign Call Assign Call Call Call Call Call FunctionDef name:_functions arg:self arguments arg Return return:yes FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "drop_duplicates",
    "source_code": "def drop_duplicates(self, *, keep: DropKeep='first', inplace: bool=False, ignore_index: bool=False) -> Series | None:\n    inplace = validate_bool_kwarg(inplace, 'inplace')\n    result = super().drop_duplicates(keep=keep)\n    if ignore_index:\n        result.index = default_index(len(result))\n    if inplace:\n        self._update_inplace(result)\n        return None\n    else:\n        return result",
    "docstring": "Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', `` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:drop_duplicates arg:self arguments arg arg arg arg Assign Call Assign Call Call If Assign Call Call If Call Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.FixedLengthRecordDataset`.')\ndef __init__(self, record_bytes, header_bytes=None, footer_bytes=None, hop_bytes=None, name=None, encoding=None):\n    rr = gen_io_ops.fixed_length_record_reader_v2(record_bytes=record_bytes, header_bytes=header_bytes, footer_bytes=footer_bytes, hop_bytes=hop_bytes, encoding=encoding, name=name)\n    super(FixedLengthRecordReader, self).__init__(rr)",
    "docstring": "Create a FixedLengthRecordReader. Args: record_bytes: An int. header_bytes: An optional int. Defaults to 0. footer_bytes: An optional int. Defaults to 0. hop_bytes: An optional int. Defaults to 0. name: A name for the operation (optional). encoding: The type of encoding for the file. Defaults to none.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:record_bytes arg:header_bytes arg:footer_bytes arg:hop_bytes arg:name arg:encoding arguments arg arg arg arg arg arg arg Assign Call Call Call Call"
  },
  {
    "library": "django",
    "name": "get_geoms",
    "source_code": "def get_geoms(self, geos=False):\n    if geos:\n        from django.contrib.gis.geos import GEOSGeometry\n        return [GEOSGeometry(feat.geom.wkb) for feat in self]\n    else:\n        return [feat.geom for feat in self]",
    "docstring": "Return a list containing the OGRGeometry for every Feature in the Layer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:get_geoms arg:self arg:geos arguments arg arg If Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "output_field",
    "source_code": "@cached_property\ndef output_field(self):\n    output_field = self._resolve_output_field()\n    if output_field is None:\n        raise OutputFieldIsNoneError('Cannot resolve expression type, unknown output_field')\n    return output_field",
    "docstring": "Return the output type of this expressions.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "FunctionDef name:output_field arg:self arguments arg Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "BackendFilter",
    "source_code": "class BackendFilter(Enum):\n    INTERACTIVE = 0\n    NON_INTERACTIVE = 1",
    "docstring": "Filter used with :meth: .. versionadded:: 3.9",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\registry.py",
    "ast_data": "ClassDef name:BackendFilter Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "float8_e5m2",
    "source_code": "def float8_e5m2(self):\n    return self._to(torch.float8_e5m2)",
    "docstring": "Casts this storage to float8_e5m2 type",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:float8_e5m2 arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "right_jacobian",
    "source_code": "@staticmethod\ndef right_jacobian(vec: Tensor) -> Tensor:\n    R_skew = vector_to_skew_symmetric_matrix(vec)\n    theta = vec.norm(dim=-1, keepdim=True)[..., None]\n    I = eye(3, device=vec.device, dtype=vec.dtype)\n    Jr = I - (1 - theta.cos()) / theta ** 2 * R_skew + (theta - theta.sin()) / theta ** 3 * (R_skew @ R_skew)\n    return Jr",
    "docstring": "Compute the right Jacobian of So3. Args: vec: the input point of shape :math:. Example: >>> vec = torch.tensor([1., 2., 3.]) >>> So3.right_jacobian(vec) tensor([[-0.0687, 0.5556, -0.0141], [-0.2267, 0.1779, 0.6236], [ 0.5074, 0.3629, 0.5890]])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:right_jacobian arg:vec arguments arg Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pygame",
    "name": "get_underline",
    "source_code": "def get_underline(self):\n    return self.underline",
    "docstring": "get_underline() -> bool check if the text will be rendered with an underline",
    "type": "method",
    "file_path": "pygame\\src_py\\ftfont.py",
    "ast_data": "FunctionDef name:get_underline arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "FieldError",
    "source_code": "class FieldError(Exception):\n    pass",
    "docstring": "Some kind of problem with a model field.",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:FieldError"
  },
  {
    "library": "pytorch",
    "name": "move_to_device_pass",
    "source_code": "def move_to_device_pass(ep: ExportedProgram, location: Union[torch.device, str, dict[str, str]]) -> ExportedProgram:\n\n    def _get_new_device(curr_device: torch.device, location: Union[torch.device, str, dict[str, str]]) -> str:\n        if isinstance(location, dict):\n            if str(curr_device) in location.keys():\n                return location[str(curr_device)]\n            else:\n                return str(curr_device)\n        else:\n            return str(location)\n    for k, v in ep.state_dict.items():\n        if isinstance(v, torch.nn.Parameter):\n            ep._state_dict[k] = torch.nn.Parameter(v.to(_get_new_device(v.device, location)), v.requires_grad)\n        else:\n            ep._state_dict[k] = v.to(_get_new_device(v.device, location))\n    for k, v in ep.constants.items():\n        if isinstance(v, torch.Tensor):\n            ep._constants[k] = v.to(_get_new_device(v.device, location))\n    for node in ep.graph.nodes:\n        if 'device' in node.kwargs:\n            kwargs = node.kwargs.copy()\n            kwargs['device'] = _get_new_device(kwargs['device'], location)\n            node.kwargs = kwargs\n        node.meta['val'] = pytree.tree_map(lambda v: v.to(_get_new_device(v.device, location)) if isinstance(v, torch.Tensor) else v, node.meta.get('val'))\n    ep.validate()\n    return ep",
    "docstring": "Move the exported program to the given device. Args: ep (ExportedProgram): The exported program to move. location (Union[torch.device, str, Dict[str, str]]): The device to move the exported program to. If a string, it is interpreted as a device name. If a dict, it is interpreted as a mapping from the existing device to the intended one Returns: ExportedProgram: The moved exported program.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\passes\\__init__.py",
    "ast_data": "FunctionDef name:move_to_device_pass arg:ep arg:location arguments arg arg FunctionDef name:_get_new_device arg:curr_device arg:location arguments arg arg If Call If Compare Call Call Return return:yes Call Return return:yes Call Return return:yes Call For Call If Call Assign Call Call Call Assign Call Call For Call If Call Assign Call Call For If Compare Assign Call Assign Call Assign Assign Call arguments arg Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_abs_string_index",
    "source_code": "def _get_abs_string_index(self, idx):\n    idx = operator.index(idx)\n    if not -len(self) <= idx < len(self):\n        raise IndexError(f'index {idx} is out of range')\n    if idx < 0:\n        idx += len(self)\n    return str(idx)",
    "docstring": "Get the absolute index for the list of modules.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:_get_abs_string_index arg:self arg:idx arguments arg arg Assign Call If Compare Call Call Raise Call If Compare Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_fontname",
    "source_code": "def set_fontname(self, fontname):\n    self.set_fontfamily(fontname)",
    "docstring": "Alias for . One-way alias only: the getter differs. Parameters ---------- fontname : {FONTNAME, 'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'} See Also -------- .font_manager.FontProperties.set_family",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_fontname arg:self arg:fontname arguments arg arg Call"
  },
  {
    "library": "pandas",
    "name": "hasnans",
    "source_code": "@cache_readonly\ndef hasnans(self) -> bool:\n    return bool(isna(self).any())",
    "docstring": "Return True if there are any NaNs. Enables various performance speedups. Returns ------- bool See Also -------- Series.isna : Detect missing values. Series.notna : Detect existing (non-missing) values. Examples -------- >>> s = pd.Series([1, 2, 3, None]) >>> s 0 1.0 1 2.0 2 3.0 3 NaN dtype: float64 >>> s.hasnans True",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:hasnans arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce",
    "source_code": "def reduce(self, fn, *args):\n    assert not context.executing_eagerly()\n    tensor_specs = []\n    for arg in args:\n        if not isinstance(arg, tensor_lib.Tensor):\n            raise ValueError(f'Got a non-Tensor argument {arg} in reduce.')\n        batched_shape = tensor_shape.TensorShape([self._maybe_iters]).concatenate(arg.shape)\n        tensor_specs.append(tensor_lib.TensorSpec(shape=batched_shape, dtype=arg.dtype))\n    concrete_function = def_function.function(fn).get_concrete_function(*tensor_specs)\n    pl_outputs = []\n    with ops.control_dependencies(args):\n        for output in concrete_function.outputs:\n            if not isinstance(output, tensor_lib.Tensor):\n                raise ValueError(f'Got a non-Tensor output {output} while running reduce.')\n            if output.shape.is_fully_defined():\n                dummy = array_ops.zeros(output.shape.as_list(), dtype=output.dtype)\n                pl_outputs.append(array_ops.placeholder_with_default(dummy, shape=output.shape))\n            else:\n                pl_outputs.append(array_ops.placeholder(output.dtype, shape=output.shape))\n        reduction_op = array_ops.identity_n(pl_outputs)[0].op\n    self._reduce_map[reduction_op] = (concrete_function, args)\n    if len(reduction_op.outputs) == 1:\n        return reduction_op.outputs[0]\n    else:\n        return tuple(reduction_op.outputs)",
    "docstring": "Performs reduction on vectorized across pfor iterations. Note that is traced once inside the loop function context. Hence any captures or side-effects will happen in that context. Call to the traced version of happens during the construction of the vectorized code. Note that this currently may not work inside a control flow construct. Args: fn: a reduction function. It will be called with arguments that have the same structure as *args but with individual values whose rank may be higher by 1 since they represent loop invariant vectorized versions of the corresponding Tensors in *args. *args: unvectorized Tensors. Returns: The result of running on the vectorized versions of . These outputs will be available as loop invariant values to all the iterations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:reduce arg:self arg:fn arguments arg arg arg Call Assign For If Call Raise Call Assign Call Call Call Call Assign Call Call Assign With Call For If Call Raise Call If Call Assign Call Call Call Call Call Call Assign Call Assign If Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_Write",
    "source_code": "def _Write(self, data, output_file):\n    _, extension = os.path.splitext(output_file)\n    with TemporaryDirectoryResource() as tempdir:\n        if extension == '.json':\n            json.dump(data, open(output_file, 'w'), sort_keys=True, indent=2)\n        elif extension in ['.tflite', '.bin']:\n            input_json = os.path.join(tempdir, 'temp.json')\n            with open(input_json, 'w') as fp:\n                json.dump(data, fp, sort_keys=True, indent=2)\n            returncode = subprocess.call([self._flatc_path, '-b', '--defaults-json', '--strict-json', '-o', tempdir, self._new_schema, input_json])\n            if returncode != 0:\n                raise RuntimeError('flatc failed to convert upgraded json to binary.')\n            shutil.copy(os.path.join(tempdir, 'temp.tflite'), output_file)\n        else:\n            raise ValueError('Invalid extension on output file %r' % output_file)",
    "docstring": "Output a json or bin version of the flatbuffer model. Args: data: Dict representing the TensorFlow Lite model to write. output_file: filename to write the converted flatbuffer to. (json, tflite, or bin extension is required). Raises: ValueError: When the extension is not json or bin RuntimeError: When flatc fails to convert json data to binary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\schema\\upgrade_schema.py",
    "ast_data": "FunctionDef name:_Write arg:self arg:data arg:output_file arguments arg arg arg Assign Call With Call If Compare Call Call If Compare Assign Call With Call Call Assign Call If Compare Raise Call Call Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_broadcasting_binary_op",
    "source_code": "def _broadcasting_binary_op(fn):\n\n    def broadcasting_binary_op_wrapper(x, y, broadcast_dims=None, name=None):\n        broadcast_dims = broadcast_dims or []\n        broadcast_dims = ops.convert_to_tensor(broadcast_dims, dtypes.int64)\n        x, y = gen_xla_ops.xla_broadcast_helper(x, y, broadcast_dims)\n        return fn(x, y, name=name)\n    return broadcasting_binary_op_wrapper",
    "docstring": "Wraps a binary Tensorflow operator and performs XLA-style broadcasting.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py",
    "ast_data": "FunctionDef name:_broadcasting_binary_op arg:fn arguments arg FunctionDef name:broadcasting_binary_op_wrapper arg:x arg:y arg:broadcast_dims arg:name arguments arg arg arg arg Assign BoolOp Assign Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_dump_onnx_model",
    "source_code": "def _dump_onnx_model(model_string: bytes, graph_module: Optional[torch.fx.GraphModule]=None) -> str:\n    prefix = os.environ.get('ONNXRT_DUMP_PATH', None)\n    if not prefix:\n        return ''\n    n = _dumped_onnx_model.get(prefix, -1) + 1\n    filename = f'{prefix}{n}.onnx'\n    with open(filename, 'wb') as f:\n        f.write(model_string)\n    _dumped_onnx_model[prefix] = n\n    if graph_module is not None:\n        filename_txt = f'{prefix}{n}.txt'\n        with open(filename_txt, 'w', encoding='utf-8') as f:\n            f.write(str(graph_module.graph))\n    return filename",
    "docstring": "Stores the onnx model into a file. The name is \"{ONNXRT_DUMP_PATH}{N}.onnx\" where *N* is the number of files already stored with this prefix. If graph_module is not None, the graph is stored as a string with the same filename except the extension (.txt).",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py",
    "ast_data": "FunctionDef name:_dump_onnx_model arg:model_string arg:graph_module arguments arg arg Assign Call If Return return:yes Assign Call Assign With Call Call Assign If Compare Assign With Call Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "app",
    "source_code": "@property\ndef app(self) -> Sphinx:\n    return self.env.app",
    "docstring": "Reference to the :class: object.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "FunctionDef name:app arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_py_list_stack",
    "source_code": "def _py_list_stack(list_, opts):\n    return opts.original_call(list_)",
    "docstring": "Overload of list_stack that executes a Python list append.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py",
    "ast_data": "FunctionDef name:_py_list_stack arg:list_ arg:opts arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "schedule_comm_wait",
    "source_code": "def schedule_comm_wait(graph: fx.Graph) -> None:\n    ops = (torch.ops._c10d_functional.all_reduce_.default, torch.ops._c10d_functional.all_reduce.default, torch.ops._c10d_functional.all_reduce_coalesced.default, torch.ops._c10d_functional.all_reduce_coalesced_.default)\n    comm_blocks = get_all_comm_blocks(graph, ops)\n    if not comm_blocks:\n        return\n    allreduce_users = OrderedSet[fx.Node]()\n    for allreduce in comm_blocks:\n        for output in allreduce.outputs:\n            allreduce_users.update(output.users)\n    node_indices = {node: i for i, node in enumerate(graph.nodes)}\n    for allreduce in comm_blocks:\n        assert len(allreduce.outputs) >= 1, f'Found a allreduce that has zero outputs/users -- {allreduce}.'\n        target_node = next(iter(next(iter(allreduce.outputs)).users))\n        target_node_index = 2 ** 31\n        for user in (user for output in allreduce.outputs for user in output.users):\n            index = node_indices[user]\n            if index < target_node_index:\n                target_node = user\n                target_node_index = index\n        wait_idx = -1\n        for wait_idx, node in enumerate(allreduce.node_list):\n            if node == allreduce.wait_nodes[0]:\n                break\n        assert wait_idx >= 0\n        move_block_before(allreduce.node_list[wait_idx:], target_node)",
    "docstring": "Delay the execution of wait tensors of allreduce until its first user. This algorithm considers the intermediate users, like split, getitem, of the wait node and schedule those intermediate users as well. This will result in a better overlapping result.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\ddp_fusion.py",
    "ast_data": "FunctionDef name:schedule_comm_wait arg:graph arguments arg Assign Assign Call If Return return:no Assign Call For For Call Assign Call For Compare Call Assign Call Call Call Call Assign For Assign If Compare Assign Assign Assign For Call If Compare Compare Call"
  },
  {
    "library": "authlib",
    "name": "create_bearer_token_generator",
    "source_code": "def create_bearer_token_generator(self, config):\n    conf = config.get('OAUTH2_ACCESS_TOKEN_GENERATOR', True)\n    access_token_generator = create_token_generator(conf, 42)\n    conf = config.get('OAUTH2_REFRESH_TOKEN_GENERATOR', False)\n    refresh_token_generator = create_token_generator(conf, 48)\n    expires_conf = config.get('OAUTH2_TOKEN_EXPIRES_IN')\n    expires_generator = create_token_expires_in_generator(expires_conf)\n    return BearerTokenGenerator(access_token_generator, refresh_token_generator, expires_generator)",
    "docstring": "Create a generator function for generating `authlib.oauth2.rfc6750.BearerTokenyour_project.generators`:: OAUTH2_TOKEN_EXPIRES_IN = { \"authorization_code\": 864000, \"urn:ietf:params:oauth:grant-type:jwt-bearer\": 3600, }",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\flask_oauth2\\authorization_server.py",
    "ast_data": "FunctionDef name:create_bearer_token_generator arg:self arg:config arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_insert_freq",
    "source_code": "def _get_insert_freq(self, loc: int, item):\n    value = self._data._validate_scalar(item)\n    item = self._data._box_func(value)\n    freq = None\n    if self.freq is not None:\n        if self.size:\n            if item is NaT:\n                pass\n            elif loc in (0, -len(self)) and item + self.freq == self[0]:\n                freq = self.freq\n            elif loc == len(self) and item - self.freq == self[-1]:\n                freq = self.freq\n        elif isinstance(self.freq, Tick):\n            freq = self.freq\n        elif self.freq.is_on_offset(item):\n            freq = self.freq\n    return freq",
    "docstring": "Find the for self.insert(loc, item).",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\datetimelike.py",
    "ast_data": "FunctionDef name:_get_insert_freq arg:self arg:loc arg:item arguments arg arg arg Assign Call Assign Call Assign If Compare If If Compare If BoolOp Compare Call Compare Assign If BoolOp Compare Call Compare Assign If Call Assign If Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.components_.shape[0]",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_fastica.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "BinConstraintT",
    "source_code": "class BinConstraintT(BinaryConstraint):\n\n    def __init__(self, lhs, rhs, op):\n        assert (isinstance(lhs, (TVar, TensorType, int)) or lhs == Dyn) and (isinstance(rhs, (TVar, TensorType, int)) or rhs == Dyn)\n        super().__init__(lhs, rhs, op)\n\n    def __eq__(self, other):\n        return super().__eq__(other)",
    "docstring": "Binary constraints about tensors",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "ClassDef name:BinConstraintT FunctionDef name:__init__ arg:self arg:lhs arg:rhs arg:op arguments arg arg arg arg BoolOp BoolOp Call Compare BoolOp Call Compare Call Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_make_complex_eigvecs",
    "source_code": "def _make_complex_eigvecs(w, vin, dtype):\n    v = np.array(vin, dtype=dtype)\n    m = w.imag > 0\n    m[:-1] |= w.imag[1:] < 0\n    for i in flatnonzero(m):\n        v.imag[:, i] = vin[:, i + 1]\n        conj(v[:, i], v[:, i + 1])\n    return v",
    "docstring": "Produce complex-valued eigenvectors from LAPACK DGGEV real-valued output",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_decomp.py",
    "ast_data": "FunctionDef name:_make_complex_eigvecs arg:w arg:vin arg:dtype arguments arg arg arg Assign Call Assign Compare Compare For Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_payloads",
    "source_code": "@property\ndef experimental_payloads(self):\n    return self._experimental_payloads",
    "docstring": "A dictionary describing the details of the error.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "FunctionDef name:experimental_payloads arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "process_bounds",
    "source_code": "def process_bounds(bounds, lenx0):\n    if bounds is None:\n        lb = np.array([-np.inf] * lenx0, dtype=np.float64)\n        ub = np.array([np.inf] * lenx0, dtype=np.float64)\n        return (lb, ub)\n    if isinstance(bounds, Bounds):\n        lb = np.array(bounds.lb, dtype=np.float64)\n        ub = np.array(bounds.ub, dtype=np.float64)\n        lb = np.concatenate((lb, -np.inf * np.ones(lenx0 - len(lb))))\n        ub = np.concatenate((ub, np.inf * np.ones(lenx0 - len(ub))))\n        return (lb, ub)\n    lb = np.array([bound[0] if bound[0] is not None else -np.inf for bound in bounds], dtype=np.float64)\n    ub = np.array([bound[1] if bound[1] is not None else np.inf for bound in bounds], dtype=np.float64)\n    lb = np.concatenate((lb, -np.inf * np.ones(lenx0 - len(lb))))\n    ub = np.concatenate((ub, np.inf * np.ones(lenx0 - len(ub))))\n    return (lb, ub)",
    "docstring": "can either be an object with the properties lb and ub, or a list of tuples indicating a lower bound and an upper bound for each variable. If the list contains fewer entries than the length of x0, the remaining entries will generated as -/+ infinity. Some examples of valid lists of tuple, assuming len(x0) == 3: [(0, 1), (2, 3), (4, 5)] -> returns [0, 2, 4], [1, 3, 5] [(0, 1), (None, 3)] -> returns [0, -inf, -inf], [1, 3, inf] [(0, 1), (-np.inf, 3)] -> returns [0, -inf, -inf], [1, 3, inf]",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\_bounds.py",
    "ast_data": "FunctionDef name:process_bounds arg:bounds arg:lenx0 arguments arg arg If Compare Assign Call Assign Call Return return:yes If Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Assign Call Compare Assign Call Compare Assign Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "categorical_column_with_hash_bucket",
    "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('feature_column.categorical_column_with_hash_bucket')\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef categorical_column_with_hash_bucket(key, hash_bucket_size, dtype=dtypes.string):\n    if hash_bucket_size is None:\n        raise ValueError('hash_bucket_size must be set. key: {}'.format(key))\n    if hash_bucket_size < 1:\n        raise ValueError('hash_bucket_size must be at least 1. hash_bucket_size: {}, key: {}'.format(hash_bucket_size, key))\n    fc_utils.assert_key_is_string(key)\n    fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))\n    return HashedCategoricalColumn(key, hash_bucket_size, dtype)",
    "docstring": "Represents sparse feature where ids are set by hashing. Use this when your sparse features are in string or integer format, and you want to distribute your inputs into a finite number of buckets by hashing. output_id = Hash(input_feature_string) % bucket_size for string type input. For int type input, the value is converted to its string representation first and then hashed by the same formula. For input dictionary , is either or . If , missing values can be represented by for int and for string, which will be dropped by this feature column. Example: Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature objects, and feature columns. hash_bucket_size: An int > 1. The number of buckets. dtype: The type of features. Only string and integer types are supported. Returns: A . Raises: ValueError: is not greater than 1. ValueError: is neither string nor integer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:categorical_column_with_hash_bucket arg:key arg:hash_bucket_size arg:dtype arguments arg arg arg If Compare Raise Call Call If Compare Raise Call Call Call Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "Stroke",
    "source_code": "class Stroke(AbstractPathEffect):\n\n    def __init__(self, offset=(0, 0), **kwargs):\n        super().__init__(offset)\n        self._gc = kwargs\n\n    def draw_path(self, renderer, gc, tpath, affine, rgbFace):\n        gc0 = renderer.new_gc()\n        gc0.copy_properties(gc)\n        gc0 = self._update_gc(gc0, self._gc)\n        renderer.draw_path(gc0, tpath, affine + self._offset_transform(renderer), rgbFace)\n        gc0.restore()",
    "docstring": "A line based PathEffect which re-draws a stroke.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py",
    "ast_data": "ClassDef name:Stroke FunctionDef name:__init__ arg:self arg:offset arguments arg arg arg Call Call Assign FunctionDef name:draw_path arg:self arg:renderer arg:gc arg:tpath arg:affine arg:rgbFace arguments arg arg arg arg arg arg Assign Call Call Assign Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "watch",
    "source_code": "def watch(obj: Any, guarded_code: Any) -> None:\n    ensure_patched(type(obj))\n    if obj not in MutationTracker.db:\n        MutationTracker.db[obj] = MutationTracker()\n    tracker = MutationTracker.db[obj]\n    tracker.track(guarded_code)",
    "docstring": "invalidate guarded_code when obj is mutated",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\mutation_guard.py",
    "ast_data": "FunctionDef name:watch arg:obj arg:guarded_code arguments arg arg Call Call If Compare Assign Call Assign Call"
  },
  {
    "library": "django",
    "name": "reverse",
    "source_code": "def reverse(self):\n    if self.query.is_sliced:\n        raise TypeError('Cannot reverse a query once a slice has been taken.')\n    clone = self._chain()\n    clone.query.standard_ordering = not clone.query.standard_ordering\n    return clone",
    "docstring": "Reverse the ordering of the QuerySet.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:reverse arg:self arguments arg If Raise Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "eval_dir",
    "source_code": "def eval_dir(model_dir, name=None):\n    return os.path.join(model_dir, 'eval' if not name else 'eval_' + name)",
    "docstring": "Construct a logdir for an eval summary writer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:eval_dir arg:model_dir arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.components_.shape[0]",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "angular_units",
    "source_code": "@property\ndef angular_units(self):\n    return self.srs.angular_units",
    "docstring": "Return the angular units.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py",
    "ast_data": "FunctionDef name:angular_units arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "inside_box_boundaries",
    "source_code": "def inside_box_boundaries(x, lb, ub):\n    return (lb <= x).all() and (x <= ub).all()",
    "docstring": "Check if lb <= x <= ub.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\qp_subproblem.py",
    "ast_data": "FunctionDef name:inside_box_boundaries arg:x arg:lb arg:ub arguments arg arg arg Return return:yes BoolOp Call Compare Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "captures",
    "source_code": "@property\ndef captures(self):\n    return [(k.deref(), v) for k, v in self._captured.items()]",
    "docstring": "Pairs of tensors and captured tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:captures arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "apply_non_transform_keypoint",
    "source_code": "def apply_non_transform_keypoint(self, input: Keypoints, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Keypoints:\n    return input",
    "docstring": "Process keypoints corresponding to the inputs that are no transformation applied.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py",
    "ast_data": "FunctionDef name:apply_non_transform_keypoint arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_finalize_rasterization",
    "source_code": "def _finalize_rasterization(draw):\n\n    @wraps(draw)\n    def draw_wrapper(artist, renderer, *args, **kwargs):\n        result = draw(artist, renderer, *args, **kwargs)\n        if renderer._rasterizing:\n            renderer.stop_rasterizing()\n            renderer._rasterizing = False\n        return result\n    return draw_wrapper",
    "docstring": "Decorator for Artist.draw method. Needed on the outermost artist, i.e. Figure, to finish up if the render is still in rasterized mode.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:_finalize_rasterization arg:draw arguments arg FunctionDef name:draw_wrapper arg:artist arg:renderer arguments arg arg arg arg Assign Call If Call Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "test_parallel",
    "source_code": "def test_parallel(num_threads=2, kwargs_list=None):\n    assert num_threads > 0\n    has_kwargs_list = kwargs_list is not None\n    if has_kwargs_list:\n        assert len(kwargs_list) == num_threads\n\n    def wrapper(func):\n\n        @wraps(func)\n        def inner(*args, **kwargs):\n            if has_kwargs_list:\n                update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])\n            else:\n                update_kwargs = lambda i: kwargs\n            threads = []\n            for i in range(num_threads):\n                updated_kwargs = update_kwargs(i)\n                thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)\n                threads.append(thread)\n            for thread in threads:\n                thread.start()\n            for thread in threads:\n                thread.join()\n        return inner\n    return wrapper",
    "docstring": "Decorator to run the same function multiple times in parallel. Parameters ---------- num_threads : int, optional The number of times the function is run in parallel. kwargs_list : list of dicts, optional The list of kwargs to update original function kwargs on different threads. Notes ----- This decorator does not pass the return value of the decorated function. Original from scikit-image:",
    "type": "function",
    "file_path": "pandas\\asv_bench\\benchmarks\\gil.py",
    "ast_data": "FunctionDef name:test_parallel arg:num_threads arg:kwargs_list arguments arg arg Compare Assign Compare If Compare Call FunctionDef name:wrapper arg:func arguments arg FunctionDef name:inner arguments arg arg If Assign arguments arg Call Assign arguments arg Assign For Call Assign Call Assign Call Call For Call For Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "attach_unique",
    "source_code": "def attach_unique(*ys, return_tuple=False):\n    res = tuple((_attach_unique(y) for y in ys))\n    if len(res) == 1 and (not return_tuple):\n        return res[0]\n    return res",
    "docstring": "Attach unique values of ys to ys and return the results. The result is a view of y, and the metadata (unique) is not attached to y. IMPORTANT: The output of this function should NEVER be returned in functions. This is to avoid this pattern: .. code:: python y = np.array([1, 2, 3]) y = attach_unique(y) y[1] = -1 # now np.unique(y) will be different from cached_unique(y) Parameters ---------- *ys : sequence of array-like Input data arrays. return_tuple : bool, default=False If True, always return a tuple even if there is only one array. Returns ------- ys : tuple of array-like or array-like Input data with unique values attached.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_unique.py",
    "ast_data": "FunctionDef name:attach_unique arguments arg arg Assign Call Call If BoolOp Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "UndefinedMetricWarning",
    "source_code": "class UndefinedMetricWarning(UserWarning):\n    pass",
    "docstring": "Warning used when the metric is invalid .. versionchanged:: 0.18 Moved from sklearn.base.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\exceptions.py",
    "ast_data": "ClassDef name:UndefinedMetricWarning"
  },
  {
    "library": "tensorflow",
    "name": "BadExportError",
    "source_code": "class BadExportError(Exception):\n    pass",
    "docstring": "Exception for bad exports.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\api\\generator2\\extractor\\extractor.py",
    "ast_data": "ClassDef name:BadExportError"
  },
  {
    "library": "pytorch",
    "name": "update",
    "source_code": "def update(self, values: dict[str, Any], overwrite: bool=False) -> None:\n    if self._level == 0:\n        raise RuntimeError('Cannot update metrics outside of a MetricsContext')\n    existing = self._metrics.keys() & values.keys()\n    if existing and (not overwrite):\n        raise RuntimeError(f'Metric(s) {existing} have already been set in the current context')\n    self._metrics.update(values)",
    "docstring": "Set multiple metrics directly. This method does NOT increment. Raises if any metric has been assigned previously in the current context and overwrite is not set to True.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py",
    "ast_data": "FunctionDef name:update arg:self arg:values arg:overwrite arguments arg arg arg If Compare Raise Call Assign Call Call If BoolOp Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "_WireProtocolInput",
    "source_code": "@dataclass\nclass _WireProtocolInput:\n    gm: torch.fx.GraphModule\n    example_inputs: Sequence[InputType]\n    inputs_to_check: Sequence[int]\n    graph_kwargs: _CompileFxKwargs\n    tracing_context: Optional[torch._guards.TracingContext]\n    config: dict[str, object]\n    virtualized: _VirtualizedSerializer\n    deterministic_guard_for_testing: Optional[torch.testing._internal.common_utils.DeterministicGuard]\n    logger_state: _LoggerState\n    lowering: _LoweringSerializer\n    fake_tensor_mode: _FakeTensorModeSerializer\n\n    def serialize(self) -> _WireProtocolPickledInput:\n        from torch.fx._graph_pickler import GraphPickler\n        return _WireProtocolPickledInput(GraphPickler.dumps(self))",
    "docstring": "For _SerializedFxCompile - encapsulates all the data being transferred (sent) from the parent to the child.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py",
    "ast_data": "ClassDef name:_WireProtocolInput FunctionDef name:serialize arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "establish_variables",
    "source_code": "def establish_variables(self, data, **kws):\n    self.data = data\n    any_strings = any([isinstance(v, str) for v in kws.values()])\n    if any_strings and data is None:\n        raise ValueError('Must pass `data` if using named variables.')\n    for var, val in kws.items():\n        if isinstance(val, str):\n            vector = data[val]\n        elif isinstance(val, list):\n            vector = np.asarray(val)\n        else:\n            vector = val\n        if vector is not None and vector.shape != (1,):\n            vector = np.squeeze(vector)\n        if np.ndim(vector) > 1:\n            err = 'regplot inputs must be 1d'\n            raise ValueError(err)\n        setattr(self, var, vector)",
    "docstring": "Extract variables from data or use directly.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:establish_variables arg:self arg:data arguments arg arg arg Assign Assign Call Call Call If BoolOp Compare Raise Call For Call If Call Assign If Call Assign Call Assign If BoolOp Compare Compare Assign Call If Compare Call Assign Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "@_docstring.interpd\ndef __init__(self, x, y, dx, dy, *, width=1.0, **kwargs):\n    super().__init__(**kwargs)\n    self.set_data(x, y, dx, dy, width)",
    "docstring": "Draws an arrow from (*x*, *y*) to (*x* + *dx*, *y* + *dy*). The width of the arrow is scaled by *width*. Parameters ---------- x : float x coordinate of the arrow tail. y : float y coordinate of the arrow tail. dx : float Arrow length in the x direction. dy : float Arrow length in the y direction. width : float, default: 1 Scale factor for the width of the arrow. With a default value of 1, the tail width is 0.2 and head width is 0.6. **kwargs Keyword arguments control the properties: %(Patch:kwdoc)s See Also -------- FancyArrow Patch that allows independent control of the head and tail properties.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:x arg:y arg:dx arg:dy arguments arg arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_localize_point",
    "source_code": "def _maybe_localize_point(ts: Timestamp | None, freq, tz, ambiguous, nonexistent) -> Timestamp | None:\n    if ts is not None and ts.tzinfo is None:\n        ambiguous = ambiguous if ambiguous != 'infer' else False\n        localize_args = {'ambiguous': ambiguous, 'nonexistent': nonexistent, 'tz': None}\n        if isinstance(freq, Tick) or freq is None:\n            localize_args['tz'] = tz\n        ts = ts.tz_localize(**localize_args)\n    return ts",
    "docstring": "Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize freq : Tick, DateOffset, or None tz : str, timezone object or None ambiguous: str, localization behavior for ambiguous times nonexistent: str, localization behavior for nonexistent times Returns ------- ts : Timestamp",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:_maybe_localize_point arg:ts arg:freq arg:tz arg:ambiguous arg:nonexistent arguments arg arg arg arg arg If BoolOp Compare Compare Assign Compare Assign If BoolOp Call Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_ticksize",
    "source_code": "def set_ticksize(self, ticksize):\n    self._ticksize = ticksize",
    "docstring": "Set length of the ticks in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:set_ticksize arg:self arg:ticksize arguments arg arg Assign"
  },
  {
    "library": "scipy",
    "name": "_root_scalar_newton_doc",
    "source_code": "def _root_scalar_newton_doc():\n    pass",
    "docstring": "Options ------- args : tuple, optional Extra arguments passed to the objective function and its derivative. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. x0 : float, required Initial guess. fprime : bool or callable, optional If is a boolean and is True, is assumed to return the value of derivative along with the objective function. can also be a callable returning the derivative of . In this case, it must accept the same arguments as . options: dict, optional Specifies any method-specific options not covered above.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_root_scalar.py",
    "ast_data": "FunctionDef name:_root_scalar_newton_doc arguments"
  },
  {
    "library": "tensorflow",
    "name": "add_resource",
    "source_code": "def add_resource(self, feature_column, resource_name, resource):\n    self._cols_to_resources_map[feature_column][resource_name] = resource\n    if self._layer is not None and isinstance(resource, trackable.Trackable):\n        if feature_column.name not in self._layer._resources:\n            self._layer._resources[feature_column.name] = data_structures.Mapping()\n        if resource_name not in self._layer._resources[feature_column.name]:\n            self._layer._resources[feature_column.name][resource_name] = resource",
    "docstring": "Creates a new resource. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A object this resource corresponds to. resource_name: Name of the resource. resource: The resource. Returns: The created resource.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:add_resource arg:self arg:feature_column arg:resource_name arg:resource arguments arg arg arg arg Assign If BoolOp Compare Call If Compare Assign Call If Compare Assign"
  },
  {
    "library": "scipy",
    "name": "r2cn",
    "source_code": "def r2cn(forward, x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None):\n    if plan is not None:\n        raise NotImplementedError('Passing a precomputed plan is not yet supported by scipy.fft functions')\n    tmp = _asfarray(x)\n    if not np.isrealobj(tmp):\n        raise TypeError('x must be a real sequence')\n    shape, axes = _init_nd_shape_and_axes(tmp, s, axes)\n    tmp, _ = _fix_shape(tmp, shape, axes)\n    norm = _normalization(norm, forward)\n    workers = _workers(workers)\n    if len(axes) == 0:\n        raise ValueError('at least 1 axis must be transformed')\n    return pfft.r2c(tmp, axes, forward, norm, None, workers)",
    "docstring": "Return multidimensional discrete Fourier transform of real input",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\basic.py",
    "ast_data": "FunctionDef name:r2cn arg:forward arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg arg If Compare Raise Call Assign Call If Call Raise Call Assign Call Assign Call Assign Call Assign Call If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_tx_resource_slug_for_name",
    "source_code": "def _tx_resource_slug_for_name(name):\n    if name != 'core':\n        name = f'contrib-{name}'\n    return name",
    "docstring": "Return the Transifex resource slug for the given name.",
    "type": "function",
    "file_path": "django\\scripts\\manage_translations.py",
    "ast_data": "FunctionDef name:_tx_resource_slug_for_name arg:name arguments arg If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sample",
    "source_code": "@torch.no_grad()\ndef sample(self, sample_shape=torch.Size()):\n    shape = self._extended_shape(sample_shape)\n    x = torch.empty(shape, dtype=self._loc.dtype, device=self.loc.device)\n    return _rejection_sample(self._loc, self._concentration, self._proposal_r, x).to(self.loc.dtype)",
    "docstring": "The sampling algorithm for the von Mises distribution is based on the following paper: D.J. Best and N.I. Fisher, \"Efficient simulation of the von Mises distribution.\" Applied Statistics (1979): 152-157. Sampling is always done in double precision internally to avoid a hang in _rejection_sample() for small values of the concentration, which starts to happen for single precision around 1e-4 (see issue #88443).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\von_mises.py",
    "ast_data": "FunctionDef name:sample arg:self arg:sample_shape arguments arg arg Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_fx_tracer_produced_raw_meta",
    "source_code": "@classmethod\ndef from_fx_tracer_produced_raw_meta(cls, raw_meta: _FX_TRACER_NN_MODULE_META_TYPE) -> _ModuleMeta:\n    module_name, module_class = raw_meta\n    return _ModuleMeta(module_name, module_class, raw_meta)",
    "docstring": "Create a module meta from raw meta produced by FX symbolic tracer.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:from_fx_tracer_produced_raw_meta arg:cls arg:raw_meta arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_old_tf_random_stateless_normal",
    "source_code": "def _old_tf_random_stateless_normal(shape, seed, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None, layout=None):\n    with ops.name_scope(name, 'stateless_random_normal', [shape, seed, mean, stddev]) as name:\n        seed = ops.convert_to_tensor(seed, dtype=dtypes.int32, name='seed')\n        shape = shape_util.shape_tensor(shape)\n        mean = ops.convert_to_tensor(mean, dtype=dtype, name='mean')\n        stddev = ops.convert_to_tensor(stddev, dtype=dtype, name='stddev')\n        rnd = api.call_with_layout(gen_stateless_random_ops.stateless_random_normal, layout, shape, seed, dtype)\n        result = math_ops.add(rnd * stddev, mean, name=name)\n        shape_util.maybe_set_static_shape(result, shape)\n        return result",
    "docstring": "DTensor stateless normal implementation that takes an layout.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\d_random.py",
    "ast_data": "FunctionDef name:_old_tf_random_stateless_normal arg:shape arg:seed arg:mean arg:stddev arg:dtype arg:name arg:layout arguments arg arg arg arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    name = capi.get_field_name(self.ptr)\n    return force_str(name, encoding=self._feat.encoding, strings_only=True)",
    "docstring": "Return the name of this Field.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\field.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "has_backend_feature",
    "source_code": "def has_backend_feature(device: Union[torch.device, str, None], feature: BackendFeature) -> bool:\n    assert isinstance(feature, BackendFeature)\n    return feature in get_backend_features(device)",
    "docstring": "See also V.graph.has_feature",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "FunctionDef name:has_backend_feature arg:device arg:feature arguments arg arg Call Return return:yes Compare Call"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start():\n    pass",
    "docstring": "Start collecting coverage.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\covercp.py",
    "ast_data": "FunctionDef name:start arguments"
  },
  {
    "library": "pygame",
    "name": "write_sys_ex",
    "source_code": "def write_sys_ex(self, when, msg):\n    _check_init()\n    self._check_open()\n    self._output.WriteSysEx(when, msg)",
    "docstring": "writes a timestamped system-exclusive midi message. Output.write_sys_ex(when, msg) msg - can be a *list* or a *string* when - a timestamp in milliseconds example: (assuming o is an onput MIDI stream) o.write_sys_ex(0,'\\xF0\\x7D\\x10\\x11\\x12\\x13\\xF7') is equivalent to o.write_sys_ex(pygame.midi.time(), [0xF0,0x7D,0x10,0x11,0x12,0x13,0xF7])",
    "type": "method",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:write_sys_ex arg:self arg:when arg:msg arguments arg arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "these_before_those_pass_constraint",
    "source_code": "def these_before_those_pass_constraint(these: Callable, those: Callable):\n\n    def depends_on(a: Callable, b: Callable):\n        return unwrap(a) != those or unwrap(b) != these\n    return depends_on",
    "docstring": "Defines a partial order ('depends on' function) where must occur before . Where the inputs are 'unwrapped' before comparison. For example, the following pass list and constraint list would be invalid. Args: these (Callable): pass which should occur first those (Callable): pass which should occur later Returns: depends_on (Callable[[Object, Object], bool]",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\pass_manager.py",
    "ast_data": "FunctionDef name:these_before_those_pass_constraint arg:these arg:those arguments arg arg FunctionDef name:depends_on arg:a arg:b arguments arg arg Return return:yes BoolOp Compare Call Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "dblint",
    "source_code": "def dblint(xa, xb, ya, yb, tck):\n    tx, ty, c, kx, ky = tck\n    return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)",
    "docstring": "Evaluate the integral of a spline over area [xa,xb] x [ya,yb]. Parameters ---------- xa, xb : float The end-points of the x integration interval. ya, yb : float The end-points of the y integration interval. tck : list [tx, ty, c, kx, ky] A sequence of length 5 returned by bisplrep containing the knot locations tx, ty, the coefficients c, and the degrees kx, ky of the spline. Returns ------- integ : float The value of the resulting integral.",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack_impl.py",
    "ast_data": "FunctionDef name:dblint arg:xa arg:xb arg:ya arg:yb arg:tck arguments arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, *args, **kwargs):\n    kwargs['discrete'] = True\n    return super().rvs(*args, **kwargs)",
    "docstring": "Random variates of given type. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). size : int or tuple of ints, optional Defining number of random variates (Default is 1). Note that has to be given as keyword, not as positional argument. random_state : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `random_staterandom_statesize`.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:rvs arg:self arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_bound3d",
    "source_code": "def _set_bound3d(self, get_bound, set_lim, axis_inverted, lower=None, upper=None, view_margin=None):\n    if upper is None and np.iterable(lower):\n        lower, upper = lower\n    old_lower, old_upper = get_bound()\n    if lower is None:\n        lower = old_lower\n    if upper is None:\n        upper = old_upper\n    set_lim(sorted((lower, upper), reverse=bool(axis_inverted())), auto=None, view_margin=view_margin)",
    "docstring": "Set 3D axis bounds.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:_set_bound3d arg:self arg:get_bound arg:set_lim arg:axis_inverted arg:lower arg:upper arg:view_margin arguments arg arg arg arg arg arg arg If BoolOp Compare Call Assign Assign Call If Compare Assign If Compare Assign Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "kernel_",
    "source_code": "@property\ndef kernel_(self):\n    if self.n_classes_ == 2:\n        return self.base_estimator_.kernel_\n    else:\n        return CompoundKernel([estimator.kernel_ for estimator in self.base_estimator_.estimators_])",
    "docstring": "Return the kernel of the base estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\_gpc.py",
    "ast_data": "FunctionDef name:kernel_ arg:self arguments arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "RichLine",
    "source_code": "class RichLine:\n\n    def __init__(self, text='', font_attr=None):\n        self.text = text\n        if font_attr:\n            self.font_attr_segs = [(0, len(text), font_attr)]\n        else:\n            self.font_attr_segs = []\n\n    def __add__(self, other):\n        ret = RichLine()\n        if isinstance(other, str):\n            ret.text = self.text + other\n            ret.font_attr_segs = self.font_attr_segs[:]\n            return ret\n        elif isinstance(other, RichLine):\n            ret.text = self.text + other.text\n            ret.font_attr_segs = self.font_attr_segs[:]\n            old_len = len(self.text)\n            for start, end, font_attr in other.font_attr_segs:\n                ret.font_attr_segs.append((old_len + start, old_len + end, font_attr))\n            return ret\n        else:\n            raise TypeError('%r cannot be concatenated with a RichLine' % other)\n\n    def __len__(self):\n        return len(self.text)",
    "docstring": "Rich single-line text. Attributes: text: A plain string, the raw text represented by this object. Should not contain newlines. font_attr_segs: A list of (start, end, font attribute) triples, representing richness information applied to substrings of text.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "ClassDef name:RichLine FunctionDef name:__init__ arg:self arg:text arg:font_attr arguments arg arg arg Assign If Assign Call Assign FunctionDef name:__add__ arg:self arg:other arguments arg arg Assign Call If Call Assign Assign Return return:yes If Call Assign Assign Assign Call For Call Return return:yes Raise Call FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "initialized",
    "source_code": "def initialized():\n    return _test_main_called",
    "docstring": "Returns whether the module is initialized.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_lib.py",
    "ast_data": "FunctionDef name:initialized arguments Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "acquire_lock",
    "source_code": "def acquire_lock(self):\n    self.locked = True\n    self.locks.setdefault(self.id, threading.RLock()).acquire()\n    if self.debug:\n        cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')",
    "docstring": "Acquire an exclusive lock on the currently-loaded session data.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:acquire_lock arg:self arguments arg Assign Call Call Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "match_filenames_once",
    "source_code": "@tf_export('io.match_filenames_once', v1=['io.match_filenames_once', 'train.match_filenames_once'])\n@deprecation.deprecated_endpoints('train.match_filenames_once')\ndef match_filenames_once(pattern, name=None):\n    with ops.name_scope(name, 'matching_filenames', [pattern]) as name:\n        return variable_v1.VariableV1(name=name, initial_value=io_ops.matching_files(pattern), trainable=False, validate_shape=False, collections=[ops.GraphKeys.LOCAL_VARIABLES])",
    "docstring": "Save the list of files matching pattern, so it is only computed once. NOTE: The order of the files returned is deterministic. Args: pattern: A file pattern (glob), or 1D tensor of file patterns. name: A name for the operations (optional). Returns: A variable that is initialized to the list of files matching the pattern(s).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\input.py",
    "ast_data": "FunctionDef name:match_filenames_once arg:pattern arg:name arguments arg arg With Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "entropy",
    "source_code": "def entropy(self, name='entropy'):\n    with self._name_scope(name):\n        return self._entropy()",
    "docstring": "Shannon entropy in nats.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:entropy arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_local_timestamps",
    "source_code": "def _local_timestamps(self) -> npt.NDArray[np.int64]:\n    if self.tz is None or timezones.is_utc(self.tz):\n        return self.asi8\n    return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)",
    "docstring": "Convert to an i8 (unix-like nanosecond timestamp) representation while keeping the local timezone and not using UTC. This is used to calculate time-of-day information as if the timestamps were timezone-naive.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py",
    "ast_data": "FunctionDef name:_local_timestamps arg:self arguments arg If BoolOp Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "transform",
    "source_code": "def transform(self, coord_trans, clone=False):\n    if clone:\n        klone = self.clone()\n        klone.transform(coord_trans)\n        return klone\n    if isinstance(coord_trans, CoordTransform):\n        capi.geom_transform(self.ptr, coord_trans.ptr)\n    elif isinstance(coord_trans, SpatialReference):\n        capi.geom_transform_to(self.ptr, coord_trans.ptr)\n    elif isinstance(coord_trans, (int, str)):\n        sr = SpatialReference(coord_trans)\n        capi.geom_transform_to(self.ptr, sr.ptr)\n    else:\n        raise TypeError('Transform only accepts CoordTransform, SpatialReference, string, and integer objects.')",
    "docstring": "Transform this geometry to a different spatial reference system. May take a CoordTransform object, a SpatialReference object, string WKT or PROJ, and/or an integer SRID. By default, return nothing and transform the geometry in-place. However, if the keyword is set, return a transformed clone of this geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:transform arg:self arg:coord_trans arg:clone arguments arg arg arg If Assign Call Call Return return:yes If Call Call If Call Call If Call Assign Call Call Raise Call"
  },
  {
    "library": "sphinx",
    "name": "AutoNumbering",
    "source_code": "class AutoNumbering(SphinxTransform):\n    default_priority = 210\n\n    def apply(self, **kwargs: Any) -> None:\n        domain: StandardDomain = self.env.domains.standard_domain\n        for node in self.document.findall(nodes.Element):\n            if domain.is_enumerable_node(node) and domain.get_numfig_title(node) is not None and (node['ids'] == []):\n                self.document.note_implicit_target(node)",
    "docstring": "Register IDs of tables, figures and literal_blocks to assign numbers.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:AutoNumbering Assign FunctionDef name:apply arg:self arguments arg arg For Call If BoolOp Call Compare Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "AutoCastDistributedVariable",
    "source_code": "class AutoCastDistributedVariable(AutoCastVariable, variable.__class__):\n\n    def __repr__(self):\n        return '<AutoCastDistributedVariable dtype={v.dtype.name} dtype_to_cast_to={v._cast_dtype.name} inner_variable={v._variable}>'.format(v=self)",
    "docstring": "An AutoCastVariable that also subclasses from variable.__class__. variable.__class__ is either a DistributedVariable or an AggregatingVariable.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py",
    "ast_data": "ClassDef name:AutoCastDistributedVariable FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None):\n    _accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), self.solver)\n    xp, _ = get_namespace(X, y, sample_weight)\n    X, y = validate_data(self, X, y, accept_sparse=_accept_sparse, dtype=[xp.float64, xp.float32], force_writeable=True, multi_output=True, y_numeric=True)\n    return super().fit(X, y, sample_weight=sample_weight)",
    "docstring": "Fit Ridge regression model. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Training data. y : ndarray of shape (n_samples,) or (n_samples, n_targets) Target values. sample_weight : float or ndarray of shape (n_samples,), default=None Individual weights for each sample. If given a float, every sample will have the same weight. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "RandomShuffleQueue",
    "source_code": "@tf_export('queue.RandomShuffleQueue', v1=['queue.RandomShuffleQueue', 'io.RandomShuffleQueue', 'RandomShuffleQueue'])\n@deprecation.deprecated_endpoints(['io.RandomShuffleQueue', 'RandomShuffleQueue'])\nclass RandomShuffleQueue(QueueBase):\n\n    def __init__(self, capacity, min_after_dequeue, dtypes, shapes=None, names=None, seed=None, shared_name=None, name='random_shuffle_queue'):\n        dtypes = _as_type_list(dtypes)\n        shapes = _as_shape_list(shapes, dtypes)\n        names = _as_name_list(names, dtypes)\n        seed1, seed2 = random_seed.get_seed(seed)\n        if seed1 is None and seed2 is None:\n            seed1, seed2 = (0, 0)\n        elif seed is None and shared_name is not None:\n            string = (str(seed1) + shared_name).encode('utf-8')\n            seed2 = int(hashlib.md5(string).hexdigest()[:8], 16) & 2147483647\n        queue_ref = gen_data_flow_ops.random_shuffle_queue_v2(component_types=dtypes, shapes=shapes, capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed1, seed2=seed2, shared_name=_shared_name(shared_name), name=name)\n        super(RandomShuffleQueue, self).__init__(dtypes, shapes, names, queue_ref)",
    "docstring": "A queue implementation that dequeues elements in a random order. See for a description of the methods on this class.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "ClassDef name:RandomShuffleQueue FunctionDef name:__init__ arg:self arg:capacity arg:min_after_dequeue arg:dtypes arg:shapes arg:names arg:seed arg:shared_name arg:name arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign Call Call Assign Call Call Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "dtype_to_arrow_c_fmt",
    "source_code": "def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str:\n    if isinstance(dtype, CategoricalDtype):\n        return ArrowCTypes.INT64\n    elif dtype == np.dtype('O'):\n        return ArrowCTypes.STRING\n    elif isinstance(dtype, ArrowDtype):\n        import pyarrow as pa\n        pa_type = dtype.pyarrow_dtype\n        if pa.types.is_decimal(pa_type):\n            return f'd:{pa_type.precision},{pa_type.scale}'\n        elif pa.types.is_timestamp(pa_type) and pa_type.tz is not None:\n            return f'ts{pa_type.unit[0]}:{pa_type.tz}'\n        format_str = PYARROW_CTYPES.get(str(pa_type), None)\n        if format_str is not None:\n            return format_str\n    format_str = getattr(ArrowCTypes, dtype.name.upper(), None)\n    if format_str is not None:\n        return format_str\n    if isinstance(dtype, pd.StringDtype):\n        return ArrowCTypes.STRING\n    elif lib.is_np_dtype(dtype, 'M'):\n        resolution = np.datetime_data(dtype)[0][0]\n        return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz='')\n    elif isinstance(dtype, DatetimeTZDtype):\n        return ArrowCTypes.TIMESTAMP.format(resolution=dtype.unit[0], tz=dtype.tz)\n    elif isinstance(dtype, pd.BooleanDtype):\n        return ArrowCTypes.BOOL\n    raise NotImplementedError(f'Conversion of {dtype} to Arrow C format string is not implemented.')",
    "docstring": "Represent pandas as a format string in Apache Arrow C notation. Parameters ---------- dtype : np.dtype Datatype of pandas DataFrame to represent. Returns ------- str Format string in Apache Arrow C notation of the given .",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\interchange\\utils.py",
    "ast_data": "FunctionDef name:dtype_to_arrow_c_fmt arg:dtype arguments arg If Call Return return:yes If Compare Call Return return:yes If Call Assign If Call Return return:yes If BoolOp Call Compare Return return:yes Assign Call Call If Compare Return return:yes Assign Call Call If Compare Return return:yes If Call Return return:yes If Call Assign Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "isroutine",
    "source_code": "def isroutine(object):\n    return _inspect.isroutine(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.isroutine.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:isroutine arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "generate_numba_agg_func",
    "source_code": "@functools.cache\ndef generate_numba_agg_func(func: Callable[..., Scalar], nopython: bool, nogil: bool, parallel: bool) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]:\n    numba_func = jit_user_function(func)\n    if TYPE_CHECKING:\n        import numba\n    else:\n        numba = import_optional_dependency('numba')\n\n    @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n    def group_agg(values: np.ndarray, index: np.ndarray, begin: np.ndarray, end: np.ndarray, num_columns: int, *args: Any) -> np.ndarray:\n        assert len(begin) == len(end)\n        num_groups = len(begin)\n        result = np.empty((num_groups, num_columns))\n        for i in numba.prange(num_groups):\n            group_index = index[begin[i]:end[i]]\n            for j in numba.prange(num_columns):\n                group = values[begin[i]:end[i], j]\n                result[i, j] = numba_func(group, group_index, *args)\n        return result\n    return group_agg",
    "docstring": "Generate a numba jitted agg function specified by values from engine_kwargs. 1. jit the user's function 2. Return a groupby agg function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's function _AND_ the groupby evaluation loop. Parameters ---------- func : function function to be applied to each group and will be JITed nopython : bool nopython to be passed into numba.jit nogil : bool nogil to be passed into numba.jit parallel : bool parallel to be passed into numba.jit Returns ------- Numba function",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\groupby\\numba_.py",
    "ast_data": "FunctionDef name:generate_numba_agg_func arg:func arg:nopython arg:nogil arg:parallel arguments arg arg arg arg Assign Call If Assign Call FunctionDef name:group_agg arg:values arg:index arg:begin arg:end arg:num_columns arguments arg arg arg arg arg arg Compare Call Call Assign Call Assign Call For Call Assign For Call Assign Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "public_numbers",
    "source_code": "@abc.abstractmethod\ndef public_numbers(self) -> DHPublicNumbers:\n    pass",
    "docstring": "Returns a DHPublicNumbers.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:public_numbers arg:self arguments arg"
  },
  {
    "library": "kornia",
    "name": "PyrDown",
    "source_code": "class PyrDown(Module):\n\n    def __init__(self, border_type: str='reflect', align_corners: bool=False, factor: float=2.0) -> None:\n        super().__init__()\n        self.border_type: str = border_type\n        self.align_corners: bool = align_corners\n        self.factor: float = factor\n\n    def forward(self, input: Tensor) -> Tensor:\n        return pyrdown(input, self.border_type, self.align_corners, self.factor)",
    "docstring": "Blur a tensor and downsamples it. Args: border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)(B, C, H / 2, W / 2)` Examples: >>> input = torch.rand(1, 2, 4, 4) >>> output = PyrDown()(input) # 1x2x2x2",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\transform\\pyramid.py",
    "ast_data": "ClassDef name:PyrDown FunctionDef name:__init__ arg:self arg:border_type arg:align_corners arg:factor arguments arg arg arg arg Call Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "Tags",
    "source_code": "class Tags:\n    admin = 'admin'\n    async_support = 'async_support'\n    caches = 'caches'\n    commands = 'commands'\n    compatibility = 'compatibility'\n    database = 'database'\n    files = 'files'\n    models = 'models'\n    security = 'security'\n    signals = 'signals'\n    sites = 'sites'\n    staticfiles = 'staticfiles'\n    templates = 'templates'\n    translation = 'translation'\n    urls = 'urls'",
    "docstring": "Built-in tags for internal checks.",
    "type": "class",
    "file_path": "django\\django\\core\\checks\\registry.py",
    "ast_data": "ClassDef name:Tags Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_default_qconfig_propagation_list",
    "source_code": "def get_default_qconfig_propagation_list() -> set[Callable]:\n    QCONFIG_PROPAGATE_MODULE_CLASS_LIST = set(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS.keys()) | set(DEFAULT_QAT_MODULE_MAPPINGS.keys()) | set(DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS.keys()) | _INCLUDE_QCONFIG_PROPAGATE_LIST\n    return copy.deepcopy(QCONFIG_PROPAGATE_MODULE_CLASS_LIST)",
    "docstring": "Get the default list of module types that we'll attach qconfig attribute to in prepare",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py",
    "ast_data": "FunctionDef name:get_default_qconfig_propagation_list arguments Assign Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "restore",
    "source_code": "def restore(self, restored_tensors, restored_shapes):\n    tensor, = restored_tensors\n\n    @def_function.function\n    def _restore(t):\n        with ops.device(self._dvariable.device):\n            return api.copy_to_mesh(t, self._original_layout)\n    if self._original_layout.mesh.device_type().upper() != 'CPU':\n        tensor = _restore(tensor)\n    return self._dvariable.assign(math_ops.cast(tensor, dtype=self._dvariable.dtype) if self._dvariable.save_as_bf16 else tensor)",
    "docstring": "Restores the same value into all variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\d_variable.py",
    "ast_data": "FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Assign FunctionDef name:_restore arg:t arguments arg With Call Return return:yes Call If Compare Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "main",
    "source_code": "def main():\n    parser = argparse.ArgumentParser(description='Cherry picking automation.')\n    parser.add_argument('--version', help='<new_major_ver>.<new_minor_ver>.<new_patch_ver>', default='')\n    parser.add_argument('--nightly', help='disable the service provisioning step', action='store_true')\n    args = parser.parse_args()\n    check_all_files()\n    old_version = get_current_semver_version()\n    if args.nightly:\n        if args.version:\n            new_version = Version.parse_from_string(args.version, NIGHTLY_VERSION)\n            new_version.set_identifier_string('-dev' + time.strftime('%Y%m%d'))\n        else:\n            new_version = Version(old_version.major, str(old_version.minor), old_version.patch, '-dev' + time.strftime('%Y%m%d'), NIGHTLY_VERSION)\n    else:\n        new_version = Version.parse_from_string(args.version, SNAPSHOT_VERSION)\n    update_tf_version_bzl(old_version, new_version)\n    update_bazelrc(old_version, new_version)\n    update_readme(old_version, new_version)\n    print('Major: %s -> %s' % (old_version.major, new_version.major))\n    print('Minor: %s -> %s' % (old_version.minor, new_version.minor))\n    print('Patch: %s -> %s\\n' % (old_version.patch, new_version.patch))\n    check_for_old_version(old_version, new_version)",
    "docstring": "This script updates all instances of version in the tensorflow directory. Requirements: version: The version tag OR nightly: Create a nightly tag with current date Raises: RuntimeError: If the script is not being run from tf source dir",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\update_version.py",
    "ast_data": "FunctionDef name:main arguments Assign Call Call Call Assign Call Call Assign Call If If Assign Call Call Call Assign Call Call Call Assign Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "to_bfloat16",
    "source_code": "@tf_export(v1=['to_bfloat16'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\n@deprecation.deprecated(date=None, instructions='Use `tf.cast` instead.')\ndef to_bfloat16(x, name='ToBFloat16'):\n    return cast(x, dtypes.bfloat16, name=name)",
    "docstring": "Casts a tensor to type . Args: x: A or or . name: A name for the operation (optional). Returns: A or or with same shape as with type . Raises: TypeError: If cannot be cast to the . @compatibility(TF2) This name was deprecated and removed in TF2, but has an exact replacement . There are no further issues with eager execution or tf.function. Before: >>> tf.compat.v1.to_bfloat16(tf.constant(3.14, dtype=tf.float32)) After: >>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.bfloat16) @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:to_bfloat16 arg:x arg:name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "_transform_input_by_shape",
    "source_code": "def _transform_input_by_shape(input: Tensor, reference_shape: Tensor, match_channel: bool=True) -> Tensor:\n    B = reference_shape[-4] if len(reference_shape) >= 4 else None\n    C = reference_shape[-3] if len(reference_shape) >= 3 else None\n    if len(input.shape) == 2:\n        input = input.unsqueeze(0)\n    if len(input.shape) == 3:\n        input = input.unsqueeze(1) if B == input.shape[-3] else input.unsqueeze(0)\n    if match_channel and C:\n        if not input.shape[-3] == C:\n            raise ValueError('The C dimension of tensor did not match with the reference tensor.')\n    elif match_channel and C is None:\n        raise ValueError('The reference tensor do not have a C dimension!')\n    return input",
    "docstring": "Reshape an input tensor to have the same dimensions as the reference_shape. Arguments: input: tensor to be transformed reference_shape: shape used as reference match_channel: if True, C_{src} == C_{ref}. otherwise, no constrain. C =1 by default",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:_transform_input_by_shape arg:input arg:reference_shape arg:match_channel arguments arg arg arg Assign Compare Call Assign Compare Call If Compare Call Assign Call If Compare Call Assign Compare Call Call If BoolOp If Compare Raise Call If BoolOp Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_num_pos_args",
    "source_code": "def _get_num_pos_args(f: Callable) -> int:\n    return len(getfullargspec(f).args)",
    "docstring": "Get number of positional args for a function Example:: >> def f(self, key1=3, key2=3): pass >> _get_num_pos_args(f) 3",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:_get_num_pos_args arg:f arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_horizontal",
    "source_code": "def set_horizontal(self, h):\n    self._horizontal = h",
    "docstring": "Parameters ---------- h : list of :mod: sizes for horizontal division",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:set_horizontal arg:self arg:h arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "set_github_output",
    "source_code": "def set_github_output(key: str, value: str) -> None:\n    if not GITHUB_OUTPUT:\n        log.warning('No env var found for GITHUB_OUTPUT, you must be running this code locally. Falling back to the deprecated print method.')\n        print(f'::set-output name={key}::{value}')\n        return\n    with open(GITHUB_OUTPUT, 'a') as f:\n        log.info(f\"Setting output: {key}='{value}'\")\n        f.write(f'{key}={value}\\n')",
    "docstring": "Defines outputs of the github action that invokes this script",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "FunctionDef name:set_github_output arg:key arg:value arguments arg arg If Call Call Return return:no With Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_is_label_reference",
    "source_code": "@final\ndef _is_label_reference(self, key: Level, axis: Axis=0) -> bool:\n    axis_int = self._get_axis_number(axis)\n    other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int)\n    return key is not None and is_hashable(key) and any((key in self.axes[ax] for ax in other_axes))",
    "docstring": "Test whether a key is a label reference for a given axis. To be considered a label reference, must be a string that: - (axis=0): Matches a column label - (axis=1): Matches an index label Parameters ---------- key : Hashable Potential label name, i.e. Index entry. axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) Returns ------- is_label: bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_is_label_reference arg:self arg:key arg:axis arguments arg arg arg Assign Call Assign Call Compare Return return:yes BoolOp Compare Call Call Compare"
  },
  {
    "library": "pandas",
    "name": "is_full",
    "source_code": "@property\ndef is_full(self) -> bool:\n    if len(self) == 0:\n        return True\n    if not self.is_monotonic_increasing:\n        raise ValueError('Index is not monotonic')\n    values = self.asi8\n    return bool((values[1:] - values[:-1] < 2).all())",
    "docstring": "Returns True if this PeriodIndex is range-like in that all Periods between start and end are present, in order.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\period.py",
    "ast_data": "FunctionDef name:is_full arg:self arguments arg If Compare Call Return return:yes If Raise Call Assign Return return:yes Call Call Compare"
  },
  {
    "library": "scipy",
    "name": "medfilt",
    "source_code": "def medfilt(volume, kernel_size=None):\n    xp = array_namespace(volume)\n    volume = xp.asarray(volume)\n    if volume.ndim == 0:\n        volume = xpx.atleast_nd(volume, ndim=1, xp=xp)\n    if not (xp.isdtype(volume.dtype, 'integral') or volume.dtype in [xp.float32, xp.float64]):\n        raise ValueError(f'dtype={volume.dtype} is not supported by medfilt')\n    if kernel_size is None:\n        kernel_size = [3] * volume.ndim\n    kernel_size = xp.asarray(kernel_size)\n    if kernel_size.shape == ():\n        kernel_size = xp.repeat(kernel_size, volume.ndim)\n    for k in range(volume.ndim):\n        if kernel_size[k] % 2 != 1:\n            raise ValueError('Each element of kernel_size should be odd.')\n    if any((k > s for k, s in zip(kernel_size, volume.shape))):\n        warnings.warn('kernel_size exceeds volume extent: the volume will be zero-padded.', stacklevel=2)\n    size = math.prod(kernel_size)\n    result = ndimage.rank_filter(volume, size // 2, size=kernel_size, mode='constant')\n    return result",
    "docstring": "Perform a median filter on an N-dimensional array. Apply a median filter to the input array using a local window-size given by . The array will automatically be zero-padded. Parameters ---------- volume : array_like An N-dimensional input array. kernel_size : array_like, optional A scalar or an N-length list giving the size of the median filter window in each dimension. Elements of should be odd. If is a scalar, then this scalar is used as the size in each dimension. Default size is 3 for each dimension. Returns ------- out : ndarray An array the same size as input containing the median filtered result. Warns ----- UserWarning If array size is smaller than kernel size along any dimension See Also -------- scipy.ndimage.median_filter scipy.signal.medfilt2d Notes ----- The more general function has a more efficient implementation of a median filter and therefore runs much faster. For 2-dimensional images with `scipy.signal.medfilt2d` may be faster.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:medfilt arg:volume arg:kernel_size arguments arg arg Assign Call Assign Call If Compare Assign Call If BoolOp Call Compare Raise Call If Compare Assign Assign Call If Compare Assign Call For Call If Compare Raise Call If Call Compare Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_pinned",
    "source_code": "def is_pinned(self, device: Union[str, torch.device]='cuda'):\n    _warn_typed_storage_removal()\n    return self._untyped_storage.is_pinned(device)",
    "docstring": "Determine whether the CPU TypedStorage is already pinned on device. Args: device (str or torch.device): The device to pin memory on (default: ``). This argument is discouraged and subject to deprecated. Returns: A boolean variable.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:is_pinned arg:self arg:device arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "inplace_softmax",
    "source_code": "def inplace_softmax(X):\n    tmp = X - X.max(axis=1)[:, np.newaxis]\n    np.exp(tmp, out=X)\n    X /= X.sum(axis=1)[:, np.newaxis]",
    "docstring": "Compute the K-way softmax function inplace. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py",
    "ast_data": "FunctionDef name:inplace_softmax arg:X arguments arg Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_get_default_annual_spacing",
    "source_code": "def _get_default_annual_spacing(nyears) -> tuple[int, int]:\n    if nyears < 11:\n        min_spacing, maj_spacing = (1, 1)\n    elif nyears < 20:\n        min_spacing, maj_spacing = (1, 2)\n    elif nyears < 50:\n        min_spacing, maj_spacing = (1, 5)\n    elif nyears < 100:\n        min_spacing, maj_spacing = (5, 10)\n    elif nyears < 200:\n        min_spacing, maj_spacing = (5, 25)\n    elif nyears < 600:\n        min_spacing, maj_spacing = (10, 50)\n    else:\n        factor = nyears // 1000 + 1\n        min_spacing, maj_spacing = (factor * 20, factor * 100)\n    return (min_spacing, maj_spacing)",
    "docstring": "Returns a default spacing between consecutive ticks for annual data.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "FunctionDef name:_get_default_annual_spacing arg:nyears arguments arg If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "LogCoshError",
    "source_code": "class LogCoshError(MeanMetricWrapper):\n\n    def __init__(self, name='logcosh', dtype=None):\n        super(LogCoshError, self).__init__(logcosh, name, dtype=dtype)",
    "docstring": "Computes the logarithm of the hyperbolic cosine of the prediction error. , where x is the error (y_pred - y_true) Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.LogCoshError() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]]) >>> m.result().numpy() 0.10844523 >>> m.reset_state() >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]], ... sample_weight=[1, 0]) >>> m.result().numpy() 0.21689045 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:LogCoshError FunctionDef name:__init__ arg:self arg:name arg:dtype arguments arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_nontrivial_match",
    "source_code": "def assert_nontrivial_match(self):\n    for trackable_object in util.list_objects(self._object_graph_view, self._options.experimental_skip_slot_variables):\n        self._checkpoint.all_python_objects.add(trackable_object)\n    if len(self._checkpoint.object_by_proto_id) <= 1:\n        unused_python_objects = object_identity.ObjectIdentitySet(_objects_with_attributes(self._checkpoint.all_python_objects)) - object_identity.ObjectIdentitySet(self._checkpoint.object_by_proto_id.values())\n        if unused_python_objects:\n            raise AssertionError(f'Nothing except the root object matched a checkpointed value. Typically this means that the checkpoint does not match the Python program. The following objects have no matching checkpointed value: {list(unused_python_objects)}')\n        else:\n            raise AssertionError(f'Nothing to load. No dependencies have been added to {self._object_graph_view.root} yet.')\n    return self",
    "docstring": "Raises an exception if only the root object matched.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:assert_nontrivial_match arg:self arguments arg For Call Call If Compare Call Assign Call Call Call Call If Raise Call Call Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_ticks_position",
    "source_code": "def get_ticks_position(self):\n    return self._tick_position",
    "docstring": "Get the ticks position. Returns ------- str : {'lower', 'upper', 'both', 'default', 'none'} The position of the bolded axis lines, ticks, and tick labels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axis3d.py",
    "ast_data": "FunctionDef name:get_ticks_position arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_gradient_components",
    "source_code": "def get_gradient_components(self, value):\n    return value",
    "docstring": "Returns the components of that should be included in gradients. For a ResourceVariable, its gradient component is its handle tensor. For now, we return the ResourceVariable because the gradient infrastructure has special logic to handle ResourceVariables. We should remove the special logic and return the handle tensor. Args: value: A . Returns: itself.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:get_gradient_components arg:self arg:value arguments arg arg Return return:yes"
  },
  {
    "library": "pygame",
    "name": "set_clip",
    "source_code": "def set_clip(self, screen_rect=None):\n    if screen_rect is None:\n        self._clip = pygame.display.get_surface().get_rect()\n    else:\n        self._clip = screen_rect\n    self._use_update = False",
    "docstring": "clip the area where to draw; pass None (default) to reset the clip LayeredDirty.set_clip(screen_rect=None): return None",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:set_clip arg:self arg:screen_rect arguments arg arg If Compare Assign Call Call Assign Assign"
  },
  {
    "library": "pandas",
    "name": "copy",
    "source_code": "def copy(self) -> Self:\n    return type(self)(self._pa_array)",
    "docstring": "Return a shallow copy of the array. Underlying ChunkedArray is immutable, so a deep copy is unnecessary. Returns ------- type(self)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:copy arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "wrap_single_convertor",
    "source_code": "def wrap_single_convertor(convert_single):\n\n    @functools.wraps(convert_single)\n    def __ua_convert__(dispatchables, coerce):\n        converted = []\n        for d in dispatchables:\n            c = convert_single(d.value, d.type, coerce and d.coercible)\n            if c is NotImplemented:\n                return NotImplemented\n            converted.append(c)\n        return converted\n    return __ua_convert__",
    "docstring": "Wraps a ``, the operation is assumed to be undefined. Accepts a signature of (value, type, coerce).",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py",
    "ast_data": "FunctionDef name:wrap_single_convertor arg:convert_single arguments arg FunctionDef name:__ua_convert__ arg:dispatchables arg:coerce arguments arg arg Assign For Assign Call BoolOp If Compare Return return:yes Call Return return:yes Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_skip_ticks",
    "source_code": "def _skip_ticks(self, labels, tickevery):\n    n = len(labels)\n    if tickevery == 0:\n        ticks, labels = ([], [])\n    elif tickevery == 1:\n        ticks, labels = (np.arange(n) + 0.5, labels)\n    else:\n        start, end, step = (0, n, tickevery)\n        ticks = np.arange(start, end, step) + 0.5\n        labels = labels[start:end:step]\n    return (ticks, labels)",
    "docstring": "Return ticks and labels at evenly spaced intervals.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\matrix.py",
    "ast_data": "FunctionDef name:_skip_ticks arg:self arg:labels arg:tickevery arguments arg arg arg Assign Call If Compare Assign If Compare Assign Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_name",
    "source_code": "def create_name(self, candidate: str, obj: Optional[Any]) -> str:\n    if obj is not None and obj in self._obj_to_name:\n        return self._obj_to_name[obj]\n    match = _name_regex.match(candidate)\n    if match is None:\n        candidate = _illegal_char_regex.sub('_', candidate)\n        if not candidate:\n            candidate = '_unnamed'\n        if candidate[0].isdigit():\n            candidate = f'_{candidate}'\n        match = _name_regex.match(candidate)\n        assert match is not None\n    base, num = match.group(1, 2)\n    if num is None or candidate in self._used_names:\n        num = self._base_count.get(candidate, 0)\n        if _illegal_names.get(candidate, obj) is not obj:\n            num += 1\n            candidate = f'{base}_{num}'\n    else:\n        num = int(num)\n    while candidate in self._used_names:\n        num += 1\n        candidate = f'{base}_{num}'\n    self._used_names.add(candidate)\n    self._base_count[base] = num\n    if obj is not None:\n        self._obj_to_name[obj] = candidate\n    return candidate",
    "docstring": "Create a unique name. Arguments: candidate: used as the basis for the unique name, relevant to the user. obj: If not None, an object that will be associated with the unique name.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:create_name arg:self arg:candidate arg:obj arguments arg arg arg If BoolOp Compare Compare Return return:yes Assign Call If Compare Assign Call If Assign If Call Assign Assign Call Compare Assign Call If BoolOp Compare Compare Assign Call If Compare Call Assign Assign Call While Compare Assign Call Assign If Compare Assign Return return:yes"
  },
  {
    "library": "pygame",
    "name": "__init__",
    "source_code": "def __init__(self, ratio):\n    self.ratio = ratio",
    "docstring": "create a new collide_rect_ratio callable Ratio is expected to be a floating point value used to scale the underlying sprite rect before checking for collisions.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ratio arguments arg arg Assign"
  },
  {
    "library": "django",
    "name": "getInnerText",
    "source_code": "def getInnerText(node):\n    inner_text = []\n    for child in node.childNodes:\n        if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:\n            inner_text.append(child.data)\n        elif child.nodeType == child.ELEMENT_NODE:\n            inner_text.extend(getInnerText(child))\n        else:\n            pass\n    return ''.join(inner_text)",
    "docstring": "Get all the inner text of a DOM node (recursively).",
    "type": "function",
    "file_path": "django\\django\\core\\serializers\\xml_serializer.py",
    "ast_data": "FunctionDef name:getInnerText arg:node arguments arg Assign For If BoolOp Compare Compare Call If Compare Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "apply_transform_mask",
    "source_code": "def apply_transform_mask(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    return self.apply_transform(input, params=params, flags=flags, transform=transform)",
    "docstring": "Process masks corresponding to the inputs that are transformed.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\elastic_transform.py",
    "ast_data": "FunctionDef name:apply_transform_mask arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "InflatableArg",
    "source_code": "class InflatableArg(NamedTuple):\n    value: Any\n    fmt: str = '{}'\n    fmt_fn: str = ''",
    "docstring": "Helper type for bundled inputs. 'value' is the compressed/deflated input that is stored in the model. Value must be of the same type as the argument to the function that it is a deflated input for. 'fmt' is a formatable code string that is executed to inflate the compressed data into the appropriate input. It can use 'value' as an input to the format str. It must result in a value of the same type as 'value'. 'fmt_fn' is a formatable function code string that is executed to inflate the compressed data into the appropriate input. It must result in a value of the same type as 'value'. The function name should be the formatable part of the string. Note: Only top level InflatableArgs can be inflated. i.e. you cannot place an inflatable arg inside of some other structure. You should instead create an inflatable arg such that the fmt code string returns the full structure of your input.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\bundled_inputs.py",
    "ast_data": "ClassDef name:InflatableArg"
  },
  {
    "library": "numpy",
    "name": "CCompiler_find_executables",
    "source_code": "def CCompiler_find_executables(self):\n    pass",
    "docstring": "Does nothing here, but is called by the get_version method and can be overridden by subclasses. In particular it is redefined in the class where more documentation can be found.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\ccompiler.py",
    "ast_data": "FunctionDef name:CCompiler_find_executables arg:self arguments arg"
  },
  {
    "library": "kornia",
    "name": "download_image",
    "source_code": "def download_image(url: str, save_to: str) -> None:\n    im = Image.open(requests.get(url, stream=True, timeout=30).raw)\n    im.save(save_to)",
    "docstring": "Download an image from a given URL and save it to a specified file path. Args: url: The URL of the image to download. save_to: The file path where the downloaded image will be saved.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\sample.py",
    "ast_data": "FunctionDef name:download_image arg:url arg:save_to arguments arg arg Assign Call Call Call"
  },
  {
    "library": "kornia",
    "name": "val2list",
    "source_code": "def val2list(x: Union[list[Any], tuple[Any, ...], Any], repeat_time: int=1) -> list[Any]:\n    if isinstance(x, list):\n        return x\n    elif isinstance(x, tuple):\n        return list(x)\n    else:\n        return [x] * repeat_time",
    "docstring": "Convert value to list.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\utils\\list.py",
    "ast_data": "FunctionDef name:val2list arg:x arg:repeat_time arguments arg arg If Call Return return:yes If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_create_backward_hook",
    "source_code": "def _create_backward_hook(self, name: str) -> Callable:\n\n    def _backward_hook(module: nn.Module, grad_input: torch.Tensor, grad_output: torch.Tensor) -> None:\n        self._cur_module_name = f'{name}.backward'\n    return _backward_hook",
    "docstring": "Insert the current module name with backward prefix for the operator name.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\memory_tracker.py",
    "ast_data": "FunctionDef name:_create_backward_hook arg:self arg:name arguments arg arg FunctionDef name:_backward_hook arg:module arg:grad_input arg:grad_output arguments arg arg arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_set_original_model_type",
    "source_code": "@classmethod\ndef _set_original_model_type(cls, model_type):\n    if model_type == conversion_metadata_fb.ModelType.NONE:\n        raise ValueError('The original model type should be specified.')\n    cls._original_model_type = model_type",
    "docstring": "Stores the original model type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_set_original_model_type arg:cls arg:model_type arguments arg arg If Compare Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "generate_square_subsequent_mask",
    "source_code": "@staticmethod\ndef generate_square_subsequent_mask(sz: int, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> Tensor:\n    return _generate_square_subsequent_mask(sz, dtype=dtype, device=device)",
    "docstring": "Generate a square causal mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0).",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\transformer.py",
    "ast_data": "FunctionDef name:generate_square_subsequent_mask arg:sz arg:device arg:dtype arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "DeviceWrapper",
    "source_code": "@deprecated(None, 'Please use tf.keras.layers.RNN instead.')\n@tf_export('nn.RNNCellDeviceWrapper', v1=[])\nclass DeviceWrapper(rnn_cell_wrapper_impl.DeviceWrapperBase, _RNNCellWrapperV2):\n\n    def __init__(self, *args, **kwargs):\n        super(DeviceWrapper, self).__init__(*args, **kwargs)\n    __init__.__doc__ = rnn_cell_wrapper_impl.DeviceWrapperBase.__init__.__doc__",
    "docstring": "Operator that ensures an RNNCell runs on a particular device.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\rnn_cell_wrapper_v2.py",
    "ast_data": "ClassDef name:DeviceWrapper FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "CallableSettingWrapper",
    "source_code": "class CallableSettingWrapper:\n\n    def __init__(self, callable_setting):\n        self._wrapped = callable_setting\n\n    def __repr__(self):\n        return repr(self._wrapped)",
    "docstring": "Object to wrap callable appearing in settings. * Not to call in the debug page (#21345). * Not to break the debug page if the callable forbidding to set attributes (#23070).",
    "type": "class",
    "file_path": "django\\django\\views\\debug.py",
    "ast_data": "ClassDef name:CallableSettingWrapper FunctionDef name:__init__ arg:self arg:callable_setting arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_concat",
    "source_code": "def _concat(self):\n    if len(self._variable_list) == 1:\n        with ops.name_scope(None):\n            return array_ops.identity(self._variable_list[0], name=self._name)\n    partition_axes = self._partition_axes()\n    if len(partition_axes) > 1:\n        raise NotImplementedError('Cannot concatenate along more than one dimension: %s.  Multi-axis partition concat is not supported' % str(partition_axes))\n    partition_ix = partition_axes[0]\n    with ops.name_scope(self._name + '/ConcatPartitions/'):\n        concatenated = array_ops.concat(self._variable_list, partition_ix)\n    with ops.name_scope(None):\n        return array_ops.identity(concatenated, name=self._name)",
    "docstring": "Returns the overall concatenated value as a . This is different from using the partitioned variable directly as a tensor (through tensor conversion and ) in that it creates a new set of operations that keeps the control dependencies from its scope. Returns: containing the concatenated value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:_concat arg:self arguments arg If Compare Call With Call Return return:yes Call Assign Call If Compare Call Raise Call Call Assign With Call Assign Call With Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_to_xy",
    "source_code": "def _to_xy(self, values, const):\n    if self.nth_coord == 0:\n        return np.stack(np.broadcast_arrays(values, const), axis=-1)\n    elif self.nth_coord == 1:\n        return np.stack(np.broadcast_arrays(const, values), axis=-1)\n    else:\n        raise ValueError('Unexpected nth_coord')",
    "docstring": "Create a (*values.shape, 2)-shape array representing (x, y) pairs. The other coordinate is filled with the constant *const*. Example:: >>> self.nth_coord = 0 >>> self._to_xy([1, 2, 3], const=0) array([[1, 0], [2, 0], [3, 0]])",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py",
    "ast_data": "FunctionDef name:_to_xy arg:self arg:values arg:const arguments arg arg arg If Compare Return return:yes Call Call If Compare Return return:yes Call Call Raise Call"
  },
  {
    "library": "numpy",
    "name": "legcompanion",
    "source_code": "def legcompanion(c):\n    [c] = pu.as_series([c])\n    if len(c) < 2:\n        raise ValueError('Series must have maximum degree of at least 1.')\n    if len(c) == 2:\n        return np.array([[-c[0] / c[1]]])\n    n = len(c) - 1\n    mat = np.zeros((n, n), dtype=c.dtype)\n    scl = 1.0 / np.sqrt(2 * np.arange(n) + 1)\n    top = mat.reshape(-1)[1::n + 1]\n    bot = mat.reshape(-1)[n::n + 1]\n    top[...] = np.arange(1, n) * scl[:n - 1] * scl[1:n]\n    bot[...] = top\n    mat[:, -1] -= c[:-1] / c[-1] * (scl / scl[-1]) * (n / (2 * n - 1))\n    return mat",
    "docstring": "Return the scaled companion matrix of c. The basis polynomials are scaled so that the companion matrix is symmetric when is an Legendre basis polynomial. This provides better eigenvalue estimates than the unscaled case and for basis polynomials the eigenvalues are guaranteed to be real if is used to obtain them. Parameters ---------- c : array_like 1-D array of Legendre series coefficients ordered from low to high degree. Returns ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg).",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\legendre.py",
    "ast_data": "FunctionDef name:legcompanion arg:c arguments arg Assign Call If Compare Call Raise Call If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dtype_to_etype",
    "source_code": "def dtype_to_etype(dtype):\n    return DTYPE_TO_XLA_ELEMENT_TYPE[str(np.dtype(dtype))]",
    "docstring": "Convenience function for reading DTYPE_TO_XLA_ELEMENT_TYPE.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "FunctionDef name:dtype_to_etype arg:dtype arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "GaussianBlur2d",
    "source_code": "class GaussianBlur2d(Module):\n\n    def __init__(self, kernel_size: tuple[int, int] | int, sigma: tuple[float, float] | Tensor, border_type: str='reflect', separable: bool=True) -> None:\n        super().__init__()\n        self.kernel_size = kernel_size\n        self.sigma = sigma\n        self.border_type = border_type\n        self.separable = separable\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(kernel_size={self.kernel_size}, sigma={self.sigma}, border_type={self.border_type}, separable={self.separable})'\n\n    def forward(self, input: Tensor) -> Tensor:\n        return gaussian_blur2d(input, self.kernel_size, self.sigma, self.border_type, self.separable)",
    "docstring": "Create an operator that blurs a tensor using a Gaussian filter. The operator smooths the given tensor with a gaussian kernel by convolving it to each channel. It supports batched operation. Arguments: kernel_size: the size of the kernel. sigma: the standard deviation of the kernel. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)(B, C, H, W)` Examples:: >>> input = torch.rand(2, 4, 5, 5) >>> gauss = GaussianBlur2d((3, 3), (1.5, 1.5)) >>> output = gauss(input) # 2x4x5x5 >>> output.shape torch.Size([2, 4, 5, 5])",
    "type": "class",
    "file_path": "kornia\\kornia\\filters\\gaussian.py",
    "ast_data": "ClassDef name:GaussianBlur2d FunctionDef name:__init__ arg:self arg:kernel_size arg:sigma arg:border_type arg:separable arguments arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "character_backward_compatibility_hook",
    "source_code": "def character_backward_compatibility_hook(item, parents, result, *args, **kwargs):\n    parent_key, parent_value = parents[-1]\n    key, value = item\n\n    def fix_usage(varname, value):\n        value = re.sub('[*]\\\\s*\\\\b' + varname + '\\\\b', varname, value)\n        value = re.sub('\\\\b' + varname + '\\\\b\\\\s*[\\\\[]\\\\s*0\\\\s*[\\\\]]', varname, value)\n        return value\n    if parent_key in ['dimension', 'check']:\n        assert parents[-3][0] == 'vars'\n        vars_dict = parents[-3][1]\n    elif key == '=':\n        assert parents[-2][0] == 'vars'\n        vars_dict = parents[-2][1]\n    else:\n        vars_dict = None\n    new_value = None\n    if vars_dict is not None:\n        new_value = value\n        for varname, vd in vars_dict.items():\n            if ischaracter(vd):\n                new_value = fix_usage(varname, new_value)\n    elif key == 'callstatement':\n        vars_dict = parents[-2][1]['vars']\n        new_value = value\n        for varname, vd in vars_dict.items():\n            if ischaracter(vd):\n                new_value = re.sub('(?<![&])\\\\b' + varname + '\\\\b', '&' + varname, new_value)\n    if new_value is not None:\n        if new_value != value:\n            outmess(f'character_bc_hook[{parent_key}.{key}]: replaced `{value}` -> `{new_value}`\\n', 1)\n        return (key, new_value)",
    "docstring": "Previously, Fortran character was incorrectly treated as character*1. This hook fixes the usage of the corresponding variables in , , , and expressions. The usage of in expression can be left unchanged because C is C typedef of , although, new implementations should use in the corresponding expressions. See for more information.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\crackfortran.py",
    "ast_data": "FunctionDef name:character_backward_compatibility_hook arg:item arg:parents arg:result arguments arg arg arg arg arg Assign Assign FunctionDef name:fix_usage arg:varname arg:value arguments arg arg Assign Call Assign Call Return return:yes If Compare Compare Assign If Compare Compare Assign Assign Assign If Compare Assign For Call If Call Assign Call If Compare Assign Assign For Call If Call Assign Call If Compare If Compare Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_large_sparse",
    "source_code": "def _check_large_sparse(X, accept_large_sparse=False):\n    if not accept_large_sparse:\n        supported_indices = ['int32']\n        if X.format == 'coo':\n            index_keys = ['col', 'row']\n        elif X.format in ['csr', 'csc', 'bsr']:\n            index_keys = ['indices', 'indptr']\n        else:\n            return\n        for key in index_keys:\n            indices_datatype = getattr(X, key).dtype\n            if indices_datatype not in supported_indices:\n                raise ValueError(f'Only sparse matrices with 32-bit integer indices are accepted. Got {indices_datatype} indices. Please do report a minimal reproducer on scikit-learn issue tracker so that support for your use-case can be studied by maintainers. See: https://scikit-learn.org/dev/developers/minimal_reproducer.html')",
    "docstring": "Raise a ValueError if X has 64bit indices and accept_large_sparse=False",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_check_large_sparse arg:X arg:accept_large_sparse arguments arg arg If Assign If Compare Assign If Compare Assign Return return:no For Assign Call If Compare Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "reserveObject",
    "source_code": "def reserveObject(self, name=''):\n    id = next(self._object_seq)\n    self.xrefTable.append([None, 0, name])\n    return Reference(id)",
    "docstring": "Reserve an ID for an indirect object. The name is used for debugging in case we forget to print out the object with writeObject.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:reserveObject arg:self arg:name arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "maybe_normalize",
    "source_code": "def maybe_normalize(arg, parm):\n    normalizer = normalizers.get(parm.annotation, None)\n    return normalizer(arg, parm) if normalizer else arg",
    "docstring": "Normalize arg if a normalizer is registered.",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_normalizations.py",
    "ast_data": "FunctionDef name:maybe_normalize arg:arg arg:parm arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_efficient_attention_forward_flop",
    "source_code": "@register_flop_formula(aten._efficient_attention_forward, get_raw=True)\ndef _efficient_attention_forward_flop(query, key, value, bias, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, *args, **kwargs) -> int:\n    sizes = _unpack_efficient_attention_nested_shapes(query=query, key=key, value=value, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_q, max_seqlen_k=max_seqlen_k)\n    return sum((sdpa_flop_count(query_shape, key_shape, value_shape) for query_shape, key_shape, value_shape, _ in sizes))",
    "docstring": "Count flops for self-attention.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\flop_counter.py",
    "ast_data": "FunctionDef name:_efficient_attention_forward_flop arg:query arg:key arg:value arg:bias arg:cu_seqlens_q arg:cu_seqlens_k arg:max_seqlen_q arg:max_seqlen_k arguments arg arg arg arg arg arg arg arg arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "handle_spider_output_async",
    "source_code": "async def handle_spider_output_async(self, result: Iterable[_T] | AsyncIterator[_T], request: Request, response: Response) -> None:\n    if isinstance(result, AsyncIterator):\n        ait = aiter_errback(result, self.handle_spider_error, request, response)\n        await maybe_deferred_to_future(parallel_async(ait, self.concurrent_items, self._process_spidermw_output, response))\n        return\n    it = iter_errback(result, self.handle_spider_error, request, response)\n    await maybe_deferred_to_future(parallel(it, self.concurrent_items, self._process_spidermw_output, response))",
    "docstring": "Pass items/requests produced by a callback to `` in parallel.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scraper.py",
    "ast_data": "AsyncFunctionDef name:handle_spider_output_async arg:self arg:result arg:request arg:response arguments arg arg arg arg If Call Assign Call Call Call Return return:no Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "lower_to_qnnpack",
    "source_code": "def lower_to_qnnpack(model: GraphModule, qconfig_map: dict[str, QConfigAny], node_name_to_scope: dict[str, tuple[str, type]]) -> GraphModule:\n    return _lower_to_native_backend(model, qconfig_map, node_name_to_scope)",
    "docstring": "Lower a quantized reference model (with reference quantized operator patterns) to qnnpack",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\lower_to_qnnpack.py",
    "ast_data": "FunctionDef name:lower_to_qnnpack arg:model arg:qconfig_map arg:node_name_to_scope arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_LessThan",
    "source_code": "class _LessThan(Constraint):\n\n    def __init__(self, upper_bound):\n        self.upper_bound = upper_bound\n        super().__init__()\n\n    def check(self, value):\n        return value < self.upper_bound\n\n    def __repr__(self):\n        fmt_string = self.__class__.__name__[1:]\n        fmt_string += f'(upper_bound={self.upper_bound})'\n        return fmt_string",
    "docstring": "Constrain to a real half line .",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_LessThan FunctionDef name:__init__ arg:self arg:upper_bound arguments arg arg Assign Call Call FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Compare FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_texts",
    "source_code": "def get_texts(self):\n    return silent_list('Text', self.texts)",
    "docstring": "Return the list of \\s in the legend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:get_texts arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "AOTICallDelegate",
    "source_code": "class AOTICallDelegate(HigherOrderOperator):\n\n    def __init__(self) -> None:\n        super().__init__('aoti_call_delegate')\n\n    def __call__(self, lowered_module: AOTI_LOWERED_MODULE, original_gm: torch.fx.GraphModule, weight_args: list[torch.Tensor], input_args: list[torch.Tensor]) -> list[torch.Tensor]:\n        return super().__call__(lowered_module, original_gm, weight_args, input_args)",
    "docstring": "aoti_call_delegate is a HOP for calling AOTInductor lowered submodule in ExportedProgram. It has the following signature: aoti_call_delegate( lowered_module: Union[AOTInductorEPModule, AOTInductorRunnerWrapper] original_gm:fx.GraphModule, weight_args: List[Tensor], input_args: List[Tensor], ) -> outputs: List[Tensor] where, - lowered_module is the AOTInductor lowered submodule, backed by compiled .so file, supporting real tensor inputs - original_gm is the stateless version of the original GraphModule before lowering, allowing FakeTensor propagation - weight_args is the list of weights in original GraphModule, including parameters and buffers - input_args is the list of flatten inputs",
    "type": "class",
    "file_path": "pytorch\\torch\\_higher_order_ops\\aoti_call_delegate.py",
    "ast_data": "ClassDef name:AOTICallDelegate FunctionDef name:__init__ arg:self arguments arg Call Call FunctionDef name:__call__ arg:self arg:lowered_module arg:original_gm arg:weight_args arg:input_args arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "maybe_disable_inference_mode_for_fake_prop",
    "source_code": "@contextmanager\ndef maybe_disable_inference_mode_for_fake_prop() -> Generator[None, None, None]:\n    if config.fake_tensor_disable_inference_mode:\n        with torch._subclasses.meta_utils.disable_inference_mode_for_fake_prop():\n            yield\n    else:\n        yield",
    "docstring": "Turns off tracking of inference_mode for fake tensor propagation. With this context manager, when a real tensor is converted to fake tensor, the fake tensor looses its inference-ness.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:maybe_disable_inference_mode_for_fake_prop arguments If With Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize",
    "source_code": "def serialize(optimizer):\n    return serialize_keras_object(optimizer)",
    "docstring": "Serialize the optimizer configuration to JSON compatible python dict. The configuration can be used for persistence and reconstruct the instance again. >>> tf.keras.optimizers.serialize(tf.keras.optimizers.SGD()) {'class_name': 'SGD', 'config': {'name': 'SGD', 'learning_rate': 0.01, 'decay': 0.0, 'momentum': 0.0, 'nesterov': False}} Args: optimizer: An instance to serialize. Returns: Python dict which contains the configuration of the input optimizer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizers.py",
    "ast_data": "FunctionDef name:serialize arg:optimizer arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sample",
    "source_code": "def sample(self, sample_shape=(), seed=None, name='sample'):\n    return self._call_sample_n(sample_shape, seed, name)",
    "docstring": "Generate samples of the specified shape. Note that a call to without arguments will generate a single sample. Args: sample_shape: 0D or 1D . Shape of the generated samples. seed: Python integer seed for RNG name: name to give to the op. Returns: samples: a with prepended dimensions .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:sample arg:self arg:sample_shape arg:seed arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "max_name_length",
    "source_code": "def max_name_length(self):\n    return None",
    "docstring": "Return the maximum length of table and column names, or None if there is no limit.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:max_name_length arg:self arguments arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_compute_colocation_summary_from_dict",
    "source_code": "def _compute_colocation_summary_from_dict(name, colocation_dict, prefix=''):\n    if not colocation_dict:\n        message = \"No node-device colocations were active during op '%s' creation.\"\n        message %= name\n        return prefix + message\n    str_list = []\n    str_list.append(\"%sNode-device colocations active during op '%s' creation:\" % (prefix, name))\n    for coloc_name, location in colocation_dict.items():\n        location_summary = '<{file}:{line}>'.format(file=location.filename, line=location.lineno)\n        subs = {'prefix': prefix, 'indent': '  ', 'name': coloc_name, 'loc': location_summary}\n        str_list.append('{prefix}{indent}with tf.colocate_with({name}): {loc}'.format(**subs))\n    return '\\n'.join(str_list)",
    "docstring": "Return a summary of an op's colocation stack. Args: name: The op name. colocation_dict: The op._colocation_dict. prefix: An optional string prefix used before each line of the multi- line string returned by this function. Returns: A multi-line string similar to: Node-device colocations active during op creation: with tf.compat.v1.colocate_with(test_node_1): with tf.compat.v1.colocate_with(test_node_2): The first line will have no padding to its left by default. Subsequent lines will have two spaces of left-padding. Use the prefix argument to increase indentation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\error_interpolation.py",
    "ast_data": "FunctionDef name:_compute_colocation_summary_from_dict arg:name arg:colocation_dict arg:prefix arguments arg arg arg If Assign Return return:yes Assign Call For Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "format_ydata",
    "source_code": "def format_ydata(self, y):\n    return (self.fmt_ydata if self.fmt_ydata is not None else self.yaxis.get_major_formatter().format_data_short)(y)",
    "docstring": "Return *y* formatted as a y-value. This function will use the attribute if it is not None, else will fall back on the yaxis major formatter.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:format_ydata arg:self arg:y arguments arg arg Return return:yes Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "output_mask",
    "source_code": "@property\ndef output_mask(self):\n    output = self.output\n    if isinstance(output, list):\n        return [getattr(x, '_keras_mask', None) for x in output]\n    else:\n        return getattr(output, '_keras_mask', None)",
    "docstring": "Retrieves the output mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer. Returns: Output mask tensor (potentially None) or list of output mask tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:output_mask arg:self arguments arg Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "call_function",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef call_function(self, target: 'Target', args: tuple[Argument, ...], kwargs: dict[str, Any]) -> Any:\n    assert not isinstance(target, str)\n    return target(*args, **kwargs)",
    "docstring": "Execute a `Node `__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation Return Any: The value returned by the function invocation",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\interpreter.py",
    "ast_data": "FunctionDef name:call_function arg:self arg:target arg:args arg:kwargs arguments arg arg arg arg Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_ttest_trim_var_mean_len",
    "source_code": "def _ttest_trim_var_mean_len(a, trim, axis):\n    a = np.sort(a, axis=axis)\n    n = a.shape[axis]\n    g = int(n * trim)\n    v = _calculate_winsorized_variance(a, g, axis)\n    n -= 2 * g\n    m = trim_mean(a, trim, axis=axis)\n    return (v, m, n)",
    "docstring": "Variance, mean, and length of winsorized input along specified axis",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:_ttest_trim_var_mean_len arg:a arg:trim arg:axis arguments arg arg arg Assign Call Assign Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_multilayer",
    "source_code": "@classmethod\ndef create_multilayer(cls, device: torch.device, dst_dtype: torch.dtype, src_dtype: torch.dtype, inner_fn: Callable[..., Any], ranges: Sequence[Expr], reduction_ranges: Sequence[Expr], reduction_type: ReductionType, split: _IntLike, reduction_hint: ReductionHint, input_node: Optional[IRNode]=None) -> TensorBox:\n    reduction_numel = sympy_product(reduction_ranges)\n    block_size = FloorDiv(reduction_numel + (split - 1), split)\n    default = cls.default_value(reduction_type, dst_dtype)\n    wrapper_fn = cls._multilayer_wrap_loader(inner_fn, reduction_ranges, reduction_numel, split, block_size, default, input_node)\n    return cls.create_multilayer_helper(device, dst_dtype, src_dtype, wrapper_fn, ranges, reduction_ranges, [*ranges, split], [block_size], reduction_type, split, reduction_hint)",
    "docstring": "Break a large reduction up into multiple smaller reductions recursively",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:create_multilayer arg:cls arg:device arg:dst_dtype arg:src_dtype arg:inner_fn arg:ranges arg:reduction_ranges arg:reduction_type arg:split arg:reduction_hint arg:input_node arguments arg arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_checkpoint_dependencies",
    "source_code": "@property\ndef _checkpoint_dependencies(self):\n    return self._self_unconditional_checkpoint_dependencies",
    "docstring": "All dependencies of this object. May be overridden to include conditional dependencies. Returns: A list of objects indicating named dependencies which should be saved along with this object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_checkpoint_dependencies arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_delta_cdf",
    "source_code": "def _delta_cdf(self, x1, x2, *args, loc=0, scale=1):\n    cdf1 = self.cdf(x1, *args, loc=loc, scale=scale)\n    result = np.where(cdf1 > 0.5, self.sf(x1, *args, loc=loc, scale=scale) - self.sf(x2, *args, loc=loc, scale=scale), self.cdf(x2, *args, loc=loc, scale=scale) - cdf1)\n    if result.ndim == 0:\n        result = result[()]\n    return result",
    "docstring": "Compute CDF(x2) - CDF(x1). Where x1 is greater than the median, compute SF(x1) - SF(x2), otherwise compute CDF(x2) - CDF(x1). This function is only useful if has an implementation that is numerically more accurate than .",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_delta_cdf arg:self arg:x1 arg:x2 arguments arg arg arg arg arg arg Assign Call Assign Call Compare Call Call Call If Compare Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, *, edgecolor=None, facecolor=None, color=None, linewidth=None, linestyle=None, antialiased=None, hatch=None, fill=True, capstyle=None, joinstyle=None, hatchcolor=None, **kwargs):\n    super().__init__()\n    if linestyle is None:\n        linestyle = 'solid'\n    if capstyle is None:\n        capstyle = CapStyle.butt\n    if joinstyle is None:\n        joinstyle = JoinStyle.miter\n    self._hatch_linewidth = mpl.rcParams['hatch.linewidth']\n    self._fill = bool(fill)\n    if color is not None:\n        if edgecolor is not None or facecolor is not None:\n            _api.warn_external(\"Setting the 'color' property will override the edgecolor or facecolor properties.\")\n        self.set_color(color)\n    else:\n        self.set_edgecolor(edgecolor)\n        self.set_hatchcolor(hatchcolor)\n        self.set_facecolor(facecolor)\n    self._linewidth = 0\n    self._unscaled_dash_pattern = (0, None)\n    self._dash_pattern = (0, None)\n    self.set_linestyle(linestyle)\n    self.set_linewidth(linewidth)\n    self.set_antialiased(antialiased)\n    self.set_hatch(hatch)\n    self.set_capstyle(capstyle)\n    self.set_joinstyle(joinstyle)\n    if len(kwargs):\n        self._internal_update(kwargs)",
    "docstring": "The following kwarg properties are supported %(Patch:kwdoc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Call Call If Compare Assign If Compare Assign If Compare Assign Assign Assign Call If Compare If BoolOp Compare Compare Call Call Call Call Call Assign Assign Assign Call Call Call Call Call Call If Call Call"
  },
  {
    "library": "django",
    "name": "adapt_unknown_value",
    "source_code": "def adapt_unknown_value(self, value):\n    if isinstance(value, datetime.datetime):\n        return self.adapt_datetimefield_value(value)\n    elif isinstance(value, datetime.date):\n        return self.adapt_datefield_value(value)\n    elif isinstance(value, datetime.time):\n        return self.adapt_timefield_value(value)\n    elif isinstance(value, decimal.Decimal):\n        return self.adapt_decimalfield_value(value)\n    else:\n        return value",
    "docstring": "Transform a value to something compatible with the backend driver. This method only depends on the type of the value. It's designed for cases where the target type isn't known, such as .raw() SQL queries. As a consequence it may not work perfectly in all circumstances.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:adapt_unknown_value arg:self arg:value arguments arg arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_fix_output",
    "source_code": "def _fix_output(output, usemask=True, asrecarray=False):\n    if not isinstance(output, ma.MaskedArray):\n        usemask = False\n    if usemask:\n        if asrecarray:\n            output = output.view(mrec.MaskedRecords)\n    else:\n        output = ma.filled(output)\n        if asrecarray:\n            output = output.view(np.recarray)\n    return output",
    "docstring": "Private function: return a recarray, a ndarray, a MaskedArray or a MaskedRecords depending on the input parameters",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:_fix_output arg:output arg:usemask arg:asrecarray arguments arg arg arg If Call Assign If If Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sequence_categorical_column_with_identity",
    "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('feature_column.sequence_categorical_column_with_identity')\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef sequence_categorical_column_with_identity(key, num_buckets, default_value=None):\n    return fc.SequenceCategoricalColumn(fc.categorical_column_with_identity(key=key, num_buckets=num_buckets, default_value=default_value))",
    "docstring": "Returns a feature column that represents sequences of integers. Pass this to or to convert sequence categorical data into dense representation for input to sequence NN, such as RNN. Example: Args: key: A unique string identifying the input feature. num_buckets: Range of inputs. Namely, inputs are expected to be in the range . default_value: If , this column's graph operations will fail for out-of-range inputs. Otherwise, this value must be in the range , and will replace out-of-range inputs. Returns: A . Raises: ValueError: if is less than one. ValueError: if is not in range .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py",
    "ast_data": "FunctionDef name:sequence_categorical_column_with_identity arg:key arg:num_buckets arg:default_value arguments arg arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "preprocess_inputs_arg_string",
    "source_code": "def preprocess_inputs_arg_string(inputs_str):\n    input_dict = {}\n    inputs_raw = inputs_str.split(';')\n    for input_raw in filter(bool, inputs_raw):\n        match = re.match('([^=]+)=([^\\\\[\\\\]]+)\\\\[([^\\\\[\\\\]]+)\\\\]$', input_raw)\n        if match:\n            input_dict[match.group(1)] = (match.group(2), match.group(3))\n        else:\n            match = re.match('([^=]+)=([^\\\\[\\\\]]+)$', input_raw)\n            if match:\n                input_dict[match.group(1)] = (match.group(2), None)\n            else:\n                raise RuntimeError('--inputs \"%s\" format is incorrect. Please follow\"<input_key>=<filename>\", or\"<input_key>=<filename>[<variable_name>]\"' % input_raw)\n    return input_dict",
    "docstring": "Parses input arg into dictionary that maps input to file/variable tuple. Parses input string in the format of, for example, \"input1=filename1[variable_name1],input2=filename2\" into a dictionary looks like {'input_key1': (filename1, variable_name1), 'input_key2': (file2, None)} , which maps input keys to a tuple of file name and variable name(None if empty). Args: inputs_str: A string that specified where to load inputs. Inputs are separated by semicolons. * For each input key: '=' or '=[]' * The optional 'variable_name' key will be set to None if not specified. Returns: A dictionary that maps input keys to a tuple of file name and variable name. Raises: RuntimeError: An error when the given input string is in a bad format.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:preprocess_inputs_arg_string arg:inputs_str arguments arg Assign Assign Call For Call Assign Call If Assign Call Call Call Assign Call If Assign Call Call Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "ensure_index",
    "source_code": "def ensure_index(index_like: Axes, copy: bool=False) -> Index:\n    if isinstance(index_like, Index):\n        if copy:\n            index_like = index_like.copy()\n        return index_like\n    if isinstance(index_like, ABCSeries):\n        name = index_like.name\n        return Index(index_like, name=name, copy=copy)\n    if is_iterator(index_like):\n        index_like = list(index_like)\n    if isinstance(index_like, list):\n        if type(index_like) is not list:\n            index_like = list(index_like)\n        if len(index_like) and lib.is_all_arraylike(index_like):\n            from pandas.core.indexes.multi import MultiIndex\n            return MultiIndex.from_arrays(index_like)\n        else:\n            return Index(index_like, copy=copy, tupleize_cols=False)\n    else:\n        return Index(index_like, copy=copy)",
    "docstring": "Ensure that we have an index from some index-like object. Parameters ---------- index_like : sequence An Index or other sequence copy : bool, default False Returns ------- index : Index or MultiIndex See Also -------- ensure_index_from_sequences Examples -------- >>> ensure_index([\"a\", \"b\"]) Index(['a', 'b'], dtype='object') >>> ensure_index([(\"a\", \"a\"), (\"b\", \"c\")]) Index([('a', 'a'), ('b', 'c')], dtype='object') >>> ensure_index([[\"a\", \"a\"], [\"b\", \"c\"]]) MultiIndex([('a', 'b'), ('a', 'c')], )",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:ensure_index arg:index_like arg:copy arguments arg arg If Call If Assign Call Return return:yes If Call Assign Return return:yes Call If Call Assign Call If Call If Compare Call Assign Call If BoolOp Call Call Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_grant_types_supported",
    "source_code": "def validate_grant_types_supported(self):\n    validate_array_value(self, 'grant_types_supported')",
    "docstring": "OPTIONAL. JSON array containing a list of the OAuth 2.0 grant type values that this authorization server supports. The array values used are the same as those used with the \"grant_types\" parameter defined by \"OAuth 2.0 Dynamic Client Registration Protocol\" [RFC7591]. If omitted, the default value is \"[\"authorization_code\", \"implicit\"]\".",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_grant_types_supported arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "_exp_sinch",
    "source_code": "def _exp_sinch(a, x):\n    if abs(x) < 0.0135:\n        x2 = x * x\n        return np.exp(a) * (1 + x2 / 6.0 * (1 + x2 / 20.0 * (1 + x2 / 42.0)))\n    else:\n        return (np.exp(a + x) - np.exp(a - x)) / (2 * x)",
    "docstring": "Stably evaluate exp(a)*sinh(x)/x Notes ----- The strategy of falling back to a sixth order Taylor expansion was suggested by the Spallation Neutron Source docs which was found on the internet by google search. The details of the cutoff point and the Horner-like evaluation was picked without reference to anything in particular. Note that sinch is not currently implemented in scipy.special, whereas the \"engineer's\" definition of sinc is implemented. The implementation of sinc involves a scaling factor of pi that distinguishes it from the \"mathematician's\" version of sinc.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_matfuncs.py",
    "ast_data": "FunctionDef name:_exp_sinch arg:a arg:x arguments arg arg If Compare Call Assign Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_trace",
    "source_code": "def set_trace(gm: fx.GraphModule) -> fx.GraphModule:\n\n    def insert_pdb(body: Sequence[str]) -> list[str]:\n        return ['import pdb; pdb.set_trace()\\n', *body]\n    with gm.graph.on_generate_code(make_transformer=lambda cur_transform: lambda body: insert_pdb(cur_transform(body) if cur_transform else body)):\n        gm.recompile()\n    return gm",
    "docstring": "Sets a breakpoint in 's generated python code. It drops into pdb when gets run. Args: gm: graph module to insert breakpoint. It is then recompiled for it to take effect. Returns: the with breakpoint inserted.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\debug.py",
    "ast_data": "FunctionDef name:set_trace arg:gm arguments arg FunctionDef name:insert_pdb arg:body arguments arg Return return:yes With Call arguments arg arguments arg Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_dfs_get_attr_dependency",
    "source_code": "def _dfs_get_attr_dependency(entry):\n    for node in entry.nodes():\n        if node.kind() == 'prim::GetAttr':\n            irv_name, irv_parent_name, attr_name = get_ir_value_parent_name_and_attr_name(node)\n            node_to_parent_map[irv_name] = irv_parent_name\n            node_to_attr_name[irv_name] = attr_name\n        for block in node.blocks():\n            _dfs_get_attr_dependency(block)",
    "docstring": "First DFS path to construct reference map and name map.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\converter.py",
    "ast_data": "FunctionDef name:_dfs_get_attr_dependency arg:entry arguments arg For Call If Compare Call Assign Call Assign Assign For Call Call"
  },
  {
    "library": "scipy",
    "name": "_binopt",
    "source_code": "def _binopt(self, other, op):\n    other = self.__class__(other)\n    fn = getattr(_sparsetools, 'csr' + op + 'csr')\n    maxnnz = self.nnz + other.nnz\n    idx_dtype = self._get_index_dtype((self.indptr, self.indices, other.indptr, other.indices), maxval=maxnnz)\n    indptr = np.empty(self.indptr.shape, dtype=idx_dtype)\n    indices = np.empty(maxnnz, dtype=idx_dtype)\n    bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']\n    if op in bool_ops:\n        data = np.empty(maxnnz, dtype=np.bool_)\n    else:\n        data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))\n    M, N = self._swap(self._shape_as_2d)\n    fn(M, N, np.asarray(self.indptr, dtype=idx_dtype), np.asarray(self.indices, dtype=idx_dtype), self.data, np.asarray(other.indptr, dtype=idx_dtype), np.asarray(other.indices, dtype=idx_dtype), other.data, indptr, indices, data)\n    A = self.__class__((data, indices, indptr), shape=self.shape)\n    A.prune()\n    return A",
    "docstring": "apply the binary operation fn to two sparse matrices.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:_binopt arg:self arg:other arg:op arguments arg arg arg Assign Call Assign Call Assign Assign Call Assign Call Assign Call Assign If Compare Assign Call Assign Call Call Assign Call Call Call Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "dequeue_if_needed",
    "source_code": "def dequeue_if_needed(self) -> Optional[torch.Event]:\n    if len(self._queue) >= self._max_num_inflight_all_gathers:\n        return self._dequeue()\n    return None",
    "docstring": "Dequeues a single event if the limit is reached.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_limiter_utils.py",
    "ast_data": "FunctionDef name:dequeue_if_needed arg:self arguments arg If Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_convert_mapping",
    "source_code": "def _convert_mapping(value, expected_type, path, context):\n    if not isinstance(value, typing.Mapping):\n        raise TypeError(f'{''.join(path)}: expected mapping, got {type(value).__name__!r}')\n    key_type, value_type = type_annotations.get_generic_type_args(expected_type)\n    return immutable_dict.ImmutableDict([(_convert_value(k, key_type, path + ('[<key>]',), context), _convert_value(v, value_type, path + (f'[{k!r}]',), context)) for k, v in value.items()])",
    "docstring": "Converts to a mapping with type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type_field.py",
    "ast_data": "FunctionDef name:_convert_mapping arg:value arg:expected_type arg:path arg:context arguments arg arg arg arg If Call Raise Call Call Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "start_monitor",
    "source_code": "@no_type_check\ndef start_monitor(self, root_module: nn.Module) -> None:\n    self._clear_state()\n    root_module.__setattr__('_memory_tracker_is_root', True)\n    for name, m in root_module.named_modules():\n        if m is not root_module:\n            m.__setattr__('_memory_tracker_is_root', False)\n        if '.fused_proxy_grouped_embedding_bag' in name:\n            continue\n        h1 = m.register_forward_pre_hook(self._create_pre_forward_hook(name))\n        h2 = m.register_forward_hook(self._create_post_forward_hook(name))\n        self._hooks.extend([h1, h2])\n    torch.cuda.empty_cache()\n    assert getattr(self, 'profile_mode', None) is None\n    self.profile_mode = MemoryProfileDispatchMode(self)\n    self.profile_mode.__enter__()",
    "docstring": "Register module hooks and entering ``. This enables operator level memory stats can be tracked during module runtime.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\memory_tracker.py",
    "ast_data": "FunctionDef name:start_monitor arg:self arg:root_module arguments arg arg Call Call For Call If Compare Call If Compare Assign Call Call Assign Call Call Call Call Compare Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_format_axes",
    "source_code": "def _format_axes(self) -> None:\n    if not self.obj.index.is_unique and self.orient in ('index', 'columns'):\n        raise ValueError(f\"DataFrame index must be unique for orient='{self.orient}'.\")\n    if not self.obj.columns.is_unique and self.orient in ('index', 'columns', 'records'):\n        raise ValueError(f\"DataFrame columns must be unique for orient='{self.orient}'.\")",
    "docstring": "Try to format axes if they are datelike.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\json\\_json.py",
    "ast_data": "FunctionDef name:_format_axes arg:self arguments arg If BoolOp Compare Raise Call If BoolOp Compare Raise Call"
  },
  {
    "library": "django",
    "name": "array",
    "source_code": "@property\ndef array(self):\n    return self._listarr(self._cs.__getitem__)",
    "docstring": "Return a numpy array for the LineString.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\linestring.py",
    "ast_data": "FunctionDef name:array arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_is_scalar_access",
    "source_code": "def _is_scalar_access(self, key: tuple) -> bool:\n    if len(key) != self.ndim:\n        return False\n    for i, k in enumerate(key):\n        if not is_scalar(k):\n            return False\n        ax = self.obj.axes[i]\n        if isinstance(ax, MultiIndex):\n            return False\n        if isinstance(k, str) and ax._supports_partial_string_indexing:\n            return False\n        if not ax._index_as_unique:\n            return False\n    return True",
    "docstring": "Returns ------- bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_is_scalar_access arg:self arg:key arguments arg arg If Compare Call Return return:yes For Call If Call Return return:yes Assign If Call Return return:yes If BoolOp Call Return return:yes If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, initializer, default_value, name=None, experimental_is_anonymous=False):\n    self._initializer = initializer\n    self._default_value = default_value\n    self._is_anonymous = experimental_is_anonymous\n    if not self._is_anonymous:\n        self._shared_name = self._initializer._shared_name\n        if not self._shared_name:\n            self._shared_name = 'hash_table_%s' % (str(uuid.uuid4()),)\n    self._name = name or 'hash_table'\n    self._table_name = None\n    super(StaticHashTable, self).__init__(default_value, initializer)\n    self._value_shape = self._default_value.get_shape()",
    "docstring": "Creates a non-initialized object. Creates a table, the type of its keys and values are specified by the initializer. Before using the table you will have to initialize it. After initialization the table will be immutable. Args: initializer: The table initializer to use. See kernel for supported key and value types. default_value: The value to use if a key is missing in the table. name: A name for the operation (optional). experimental_is_anonymous: Whether to use anonymous mode for the table (default is False). In anonymous mode, the table resource can only be accessed via a resource handle. It can't be looked up by a name. When all resource handles pointing to that resource are gone, the resource will be deleted automatically. Returns: A object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:initializer arg:default_value arg:name arg:experimental_is_anonymous arguments arg arg arg arg arg Assign Assign Assign If Assign If Assign Call Call Assign BoolOp Assign Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "estimate_kernel_num_bytes",
    "source_code": "def estimate_kernel_num_bytes(self):\n    nbytes = []\n    ninplace_args = len(unique(self.args.inplace_buffers.values()))\n    _, call_args, _, _ = self.args.python_argdefs()\n    buf_accesses = self.features.buf_accesses()\n    out_numel = V.graph.sizevars.size_hint(sympy_product(self.numels.values()))\n    for i, arg in enumerate(call_args):\n        if arg not in buf_accesses:\n            nbytes.append(0)\n            continue\n        arg_numel = V.graph.get_numel(arg)\n        buf_size = V.graph.sizevars.size_hint(arg_numel)\n        if buf_size > out_numel:\n            indices = OrderedSet[Any]()\n            no_index_dep_count = 0\n            for dep in buf_accesses[arg]:\n                if isinstance(dep, (StarDep, WeakDep)):\n                    indices.add(f'no_index_dep_{no_index_dep_count}')\n                    no_index_dep_count += 1\n                else:\n                    indices.add(dep.index)\n            numel = len(indices) * out_numel\n        else:\n            numel = buf_size\n        dtype = V.graph.get_dtype(arg)\n        dtype_size = get_dtype_size(dtype)\n        nbytes.append(numel * dtype_size * (1 + int(i < ninplace_args)))\n    return sum(nbytes)",
    "docstring": "Try the best to estimate the total size (in bytes) of the kernel's inputs and outputs, which is used for estimating the memory throughput of this kernel. This information is used for checking how far we are from the peak memory bandwidth. It's important that we want to avoid overestimating the sizes of the inputs and outputs, because it can wrongfully give us a very large memory traffic value, which may be even larger than the theoretical bandwidth and thus become very misleading. This is particularly problematic for cases where we slice some inputs. In those cases, we should only count the size of the \"slices\" instead of the original inputs, because only the slices contribute to the real memory traffic.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py",
    "ast_data": "FunctionDef name:estimate_kernel_num_bytes arg:self arguments arg Assign Assign Call Call Call Assign Call Assign Call Assign Call Call Call For Call If Compare Call Assign Call Assign Call If Compare Assign Call Assign For If Call Call Call Assign Call Assign Assign Call Assign Call Call Call Compare Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "LockDraw",
    "source_code": "class LockDraw:\n\n    def __init__(self):\n        self._owner = None\n\n    def __call__(self, o):\n        if not self.available(o):\n            raise ValueError('already locked')\n        self._owner = o\n\n    def release(self, o):\n        if not self.available(o):\n            raise ValueError('you do not own this lock')\n        self._owner = None\n\n    def available(self, o):\n        return not self.locked() or self.isowner(o)\n\n    def isowner(self, o):\n        return self._owner is o\n\n    def locked(self):\n        return self._owner is not None",
    "docstring": "Some widgets, like the cursor, draw onto the canvas, and this is not desirable under all circumstances, like when the toolbar is in zoom-to-rect mode and drawing a rectangle. To avoid this, a widget can acquire a canvas' lock with `` before drawing on the canvas; this will prevent other widgets from doing so at the same time (if they also try to acquire the lock first).",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "ClassDef name:LockDraw FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:__call__ arg:self arg:o arguments arg arg If Call Raise Call Assign FunctionDef name:release arg:self arg:o arguments arg arg If Call Raise Call Assign FunctionDef name:available arg:self arg:o arguments arg arg Return return:yes BoolOp Call Call FunctionDef name:isowner arg:self arg:o arguments arg arg Return return:yes Compare FunctionDef name:locked arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_cmap",
    "source_code": "def get_cmap(self, cmap):\n    if cmap is None:\n        return self[mpl.rcParams['image.cmap']]\n    if isinstance(cmap, colors.Colormap):\n        return cmap\n    if isinstance(cmap, str):\n        _api.check_in_list(sorted(_colormaps), cmap=cmap)\n        return self[cmap]\n    raise TypeError('get_cmap expects None or an instance of a str or Colormap . ' + f'you passed {cmap!r} of type {type(cmap)}')",
    "docstring": "Return a color map specified through *cmap*. Parameters ---------- cmap : str or or None - if a , return it - if a string, look it up in `image.cmap` Returns ------- Colormap",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\cm.py",
    "ast_data": "FunctionDef name:get_cmap arg:self arg:cmap arguments arg arg If Compare Return return:yes If Call Return return:yes If Call Call Call Return return:yes Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "to_parquet",
    "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef to_parquet(df: DataFrame, path: FilePath | WriteBuffer[bytes] | None=None, engine: str='auto', compression: str | None='snappy', index: bool | None=None, storage_options: StorageOptions | None=None, partition_cols: list[str] | None=None, filesystem: Any=None, **kwargs) -> bytes | None:\n    if isinstance(partition_cols, str):\n        partition_cols = [partition_cols]\n    impl = get_engine(engine)\n    path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path\n    impl.write(df, path_or_buf, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, filesystem=filesystem, **kwargs)\n    if path is None:\n        assert isinstance(path_or_buf, io.BytesIO)\n        return path_or_buf.getvalue()\n    else:\n        return None",
    "docstring": "Write a DataFrame to the parquet format. Parameters ---------- df : DataFrame path : str, path object, file-like object, or None, default None String, path object (implementing ``. .. versionadded:: 2.1.0 kwargs Additional keyword arguments passed to the engine. Returns ------- bytes if no path argument is provided else None",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parquet.py",
    "ast_data": "FunctionDef name:to_parquet arg:df arg:path arg:engine arg:compression arg:index arg:storage_options arg:partition_cols arg:filesystem arguments arg arg arg arg arg arg arg arg arg If Call Assign Assign Call Compare Call Call If Compare Call Return return:yes Call Return return:no Call"
  },
  {
    "library": "pytorch",
    "name": "quantize_per_tensor_tensor",
    "source_code": "@impl(quantized_decomposed_lib, 'quantize_per_tensor.tensor', 'CompositeExplicitAutograd')\ndef quantize_per_tensor_tensor(input: torch.Tensor, scale: torch.Tensor, zero_point: torch.Tensor, quant_min: int, quant_max: int, dtype: torch.dtype) -> torch.Tensor:\n    assert zero_point.numel() == 1, f'Expecting zero_point tensor to be one element, but received : {zero_point.numel()}'\n    assert scale.numel() == 1, f'Expecting scale tensor to be one element, but received : {scale.numel()}'\n    return quantize_per_tensor(input, scale.item(), zero_point.item(), quant_min, quant_max, dtype)",
    "docstring": "Affine quantization for the Tensor using the same quantization parameters to map from floating point to quantized values Same as but scale and zero_point are Scalar Tensor instead of scalar values",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:quantize_per_tensor_tensor arg:input arg:scale arg:zero_point arg:quant_min arg:quant_max arg:dtype arguments arg arg arg arg arg arg Compare Call Call Compare Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_flat_structure",
    "source_code": "@property\ndef _flat_structure(self):\n    return {'output_shapes': self._flat_shapes, 'output_types': self._flat_types}",
    "docstring": "Helper for setting and attrs of an op. Most dataset op constructors expect and arguments that represent the flattened structure of an element. This helper function generates these attrs as a keyword argument dictionary, allowing implementations to pass to the op constructor. Returns: A dictionary of keyword arguments that can be passed to a dataset op constructor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_flat_structure arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "AddForwardLoopCounter",
    "source_code": "def AddForwardLoopCounter(self, outer_grad_state):\n    n = constant_op.constant(0, name='f_count')\n    if outer_grad_state is not None:\n        outer_add_op = outer_grad_state.forward_index.op.inputs[0].op\n        n.op._add_control_input(outer_add_op)\n    self.Enter()\n    self.AddName(n.name)\n    enter_n = _Enter(n, self._name, is_constant=False, parallel_iterations=self._parallel_iterations, name='f_count')\n    self.loop_enters.append(enter_n)\n    merge_n = merge([enter_n, enter_n])[0]\n    switch_n = switch(merge_n, self._pivot)\n    index = math_ops.add(switch_n[1], 1)\n    next_n = _NextIteration(index)\n    merge_n.op._update_input(1, next_n)\n    total_iterations = exit(switch_n[0], name='f_count')\n    self.loop_exits.append(total_iterations)\n    self.ExitResult([total_iterations])\n    self.Exit()\n    return (total_iterations, next_n)",
    "docstring": "Adds a loop that counts the number of iterations. This is added to the forward loop at the time when we start to create the loop for backprop gradient computation. Called in the outer context of this forward context. The pseudocode is: Note that a control dependency is added to to ensure the correct execution order of stack push ops. Args: outer_grad_state: The outer grad state. None if not nested. Returns: The number of iterations taken by the forward loop and the loop index.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:AddForwardLoopCounter arg:self arg:outer_grad_state arguments arg arg Assign Call If Compare Assign Call Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.components_.shape[0]",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "ContentFile",
    "source_code": "class ContentFile(File):\n\n    def __init__(self, content, name=None):\n        stream_class = StringIO if isinstance(content, str) else BytesIO\n        super().__init__(stream_class(content), name=name)\n        self.size = len(content)\n\n    def __str__(self):\n        return 'Raw content'\n\n    def __bool__(self):\n        return True\n\n    def open(self, mode=None):\n        self.seek(0)\n        return self\n\n    def close(self):\n        pass\n\n    def write(self, data):\n        self.__dict__.pop('size', None)\n        return self.file.write(data)",
    "docstring": "A File-like object that takes just raw content, rather than an actual file.",
    "type": "class",
    "file_path": "django\\django\\core\\files\\base.py",
    "ast_data": "ClassDef name:ContentFile FunctionDef name:__init__ arg:self arg:content arg:name arguments arg arg arg Assign Call Call Call Call Assign Call FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:__bool__ arg:self arguments arg Return return:yes FunctionDef name:open arg:self arg:mode arguments arg arg Call Return return:yes FunctionDef name:close arg:self arguments arg FunctionDef name:write arg:self arg:data arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "last_checkpoints",
    "source_code": "@property\ndef last_checkpoints(self):\n    return list((self._CheckpointFilename(p) for p in self._last_checkpoints))",
    "docstring": "List of not-yet-deleted checkpoint filenames. You can pass any of the returned values to . Returns: A list of checkpoint filenames, sorted from oldest to newest.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:last_checkpoints arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "convert",
    "source_code": "def convert(self) -> str:\n    return printing.pprint_thing(self.expr)",
    "docstring": "Convert an expression for evaluation. Defaults to return the expression as a string.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\engines.py",
    "ast_data": "FunctionDef name:convert arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None) -> tuple[Sequence[Any], Mapping[str, Any]]:\n    ordered_params = tuple((model.state_dict[name] for name in model.graph_signature.parameters))\n    non_persistent_buffers = set(model.graph_signature.non_persistent_buffers)\n    ordered_buffers = []\n    for name in model.graph_signature.buffers:\n        if name in non_persistent_buffers:\n            ordered_buffers.append(model.constants[name])\n        else:\n            ordered_buffers.append(model.state_dict[name])\n    ordered_constant_tensors = tuple((model.constants[fqn] for fqn in model.graph_signature.lifted_tensor_constants))\n    updated_args = (*ordered_params, *ordered_buffers, *ordered_constant_tensors, *model_args)\n    if model_kwargs:\n        return MergeKwargsIntoArgsInputStep().apply(updated_args, model_kwargs, model=model)\n    return (updated_args, {})",
    "docstring": "Convert complex tensors to float tensors. Args: model_args: The model args. model_kwargs: The model kwargs. model: The PyTorch model. Returns: A tuple of the model args and kwargs.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:apply arg:self arg:model_args arg:model_kwargs arg:model arguments arg arg arg arg Assign Call Assign Call Assign For If Compare Call Call Assign Call Assign If Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "rosen_hess_prod",
    "source_code": "@xp_capabilities(skip_backends=[('jax.numpy', \"JAX doesn't allow item assignment.\")])\ndef rosen_hess_prod(x, p):\n    xp = array_namespace(x, p)\n    x = xp_promote(x, force_floating=True, xp=xp)\n    x = xpx.atleast_nd(x, ndim=1, xp=xp)\n    p = xp.asarray(p, dtype=x.dtype)\n    Hp = xp.zeros(x.shape[0], dtype=x.dtype)\n    Hp[0] = (1200 * x[0] ** 2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]\n    Hp[1:-1] = -400 * x[:-2] * p[:-2] + (202 + 1200 * x[1:-1] ** 2 - 400 * x[2:]) * p[1:-1] - 400 * x[1:-1] * p[2:]\n    Hp[-1] = -400 * x[-2] * p[-2] + 200 * p[-1]\n    return Hp",
    "docstring": "Product of the Hessian matrix of the Rosenbrock function with a vector. Parameters ---------- x : array_like 1-D array of points at which the Hessian matrix is to be computed. p : array_like 1-D array, the vector to be multiplied by the Hessian matrix. Returns ------- rosen_hess_prod : ndarray The Hessian matrix of the Rosenbrock function at multiplied by the vector . See Also -------- rosen, rosen_der, rosen_hess Examples -------- >>> import numpy as np >>> from scipy.optimize import rosen_hess_prod >>> X = 0.1 * np.arange(9) >>> p = 0.5 * np.arange(9) >>> rosen_hess_prod(X, p) array([ -0., 27., -10., -95., -192., -265., -278., -195., -180.])",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:rosen_hess_prod arg:x arg:p arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "time_pdist",
    "source_code": "def time_pdist(self, num_points, metric):\n    distance.pdist(self.points, self.metric, w=self.weights, **self.kwargs)",
    "docstring": "Time scipy.spatial.distance.pdist for weighted distance metrics.",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_pdist arg:self arg:num_points arg:metric arguments arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_unsafe_preserve_version_counter",
    "source_code": "class _unsafe_preserve_version_counter(_DecoratorContextManager):\n\n    def __init__(self, tensors: Union[torch.Tensor, tuple[torch.Tensor, ...]]) -> None:\n        self.tensors = (tensors,) if isinstance(tensors, torch.Tensor) else tensors\n        assert isinstance(self.tensors, tuple)\n        self.prev_versions = tuple((t._version for t in self.tensors))\n\n    def __enter__(self) -> None:\n        pass\n\n    def __exit__(self, *args) -> None:\n        torch._C._autograd._unsafe_set_version_counter(self.tensors, self.prev_versions)",
    "docstring": "DO NOT USE THIS UNLESS YOU KNOW EXACTLY WHAT YOU'RE DOING. This context manager can lead to arbitrary silent-correctness issues in any other part of your code (even the ones not touched directly by the context manager)! Ordinarily, autograd will track mutations to tensors by incrementing it's attribute. This is generally important for correctness, as for example, mutating a tensor that autograd has saved for the backwards pass can result in incorrect gradients, and autograd uses the version counter to detect and error out in this situation. However, there are rare instances where it might be useful to hide mutations from autograd. For example: if a tensor is very large, and you'd like to free its memory by storing it elsewhere, and re-populate the tensor right before it is needed by autograd. Args: tensor (torch.Tensor): the tensor in question, that you would like to preserve the version counter of. .. note:: This API does not apply to :ref:.",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\grad_mode.py",
    "ast_data": "ClassDef name:_unsafe_preserve_version_counter FunctionDef name:__init__ arg:self arg:tensors arguments arg arg Assign Call Call Assign Call FunctionDef name:__enter__ arg:self arguments arg FunctionDef name:__exit__ arg:self arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "LintSeverity",
    "source_code": "class LintSeverity(str, enum.Enum):\n    ERROR = 'error'\n    WARNING = 'warning'\n    ADVICE = 'advice'\n    DISABLED = 'disabled'",
    "docstring": "Severity of a lint message.",
    "type": "class",
    "file_path": "pytorch\\tools\\linter\\adapters\\ruff_linter.py",
    "ast_data": "ClassDef name:LintSeverity Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "build_chunks",
    "source_code": "def build_chunks(self) -> int:\n    if self.size_check(self.proto_size):\n        new_proto = type(self._proto)()\n        new_proto.MergeFrom(self._proto)\n        self._proto.Clear()\n        self.add_chunk(new_proto, [])\n        return self.proto_size\n    return 0",
    "docstring": "Creates a chunk for the entire proto and returns the original size.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split_graph_def.py",
    "ast_data": "FunctionDef name:build_chunks arg:self arguments arg If Call Assign Call Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "is_valid_backend",
    "source_code": "def is_valid_backend(self, backend):\n    if not backend.startswith('module://'):\n        backend = backend.lower()\n    backwards_compat = {'module://ipympl.backend_nbagg': 'widget', 'module://matplotlib_inline.backend_inline': 'inline'}\n    backend = backwards_compat.get(backend, backend)\n    if backend in self._BUILTIN_BACKEND_TO_GUI_FRAMEWORK or backend in self._backend_to_gui_framework:\n        return True\n    if backend.startswith('module://'):\n        self._backend_to_gui_framework[backend] = 'unknown'\n        return True\n    self._ensure_entry_points_loaded()\n    if backend in self._backend_to_gui_framework:\n        return True\n    return False",
    "docstring": "Return True if the backend name is valid, False otherwise. A backend name is valid if it is one of the built-in backends or has been dynamically added via an entry point. Those beginning with `` are always considered valid and are added to the current list of all backends within this function. Even if a name is valid, it may not be importable or usable. This can only be determined by loading and using the backend module. Parameters ---------- backend : str Name of backend. Returns ------- bool True if backend is valid, False otherwise.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\registry.py",
    "ast_data": "FunctionDef name:is_valid_backend arg:self arg:backend arguments arg arg If Call Assign Call Assign Assign Call If BoolOp Compare Compare Return return:yes If Call Assign Return return:yes Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "EggHolder",
    "source_code": "class EggHolder(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-512.1] * self.N, [512.0] * self.N))\n        self.global_optimum = [[512.0, 404.2319]]\n        self.fglob = -959.640662711\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        vec = -(x[1:] + 47) * sin(sqrt(abs(x[1:] + x[:-1] / 2.0 + 47))) - x[:-1] * sin(sqrt(abs(x[:-1] - (x[1:] + 47))))\n        return sum(vec)",
    "docstring": "Egg Holder [1]_ objective function. This class defines the Egg Holder global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{EggHolder}}=\\sum_{1}^{n - 1}\\left[-\\left(x_{i + 1} + 47 \\right ) \\sin\\sqrt{\\lvert x_{i+1} + x_i/2 + 47 \\rvert} - x_i \\sin\\sqrt{\\lvert x_i - (x_{i + 1} + 47)\\rvert}\\right ] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: Jamil is missing a minus sign on the fglob value",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_E.py",
    "ast_data": "ClassDef name:EggHolder Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "disconnect",
    "source_code": "def disconnect(self, cid):\n    self._observers.disconnect(cid)",
    "docstring": "Remove the observer with connection id *cid*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:disconnect arg:self arg:cid arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_SessionWithFeedDictAdditions",
    "source_code": "class _SessionWithFeedDictAdditions(session_lib.SessionInterface):\n\n    def __init__(self, session, feed_additions):\n        self._wrapped_session = session\n        self._feed_additions = feed_additions\n\n    def run(self, fetches, feed_dict=None, **kwargs):\n        if feed_dict is None:\n            feed_dict = {}\n        else:\n            feed_dict = feed_dict.copy()\n        feed_dict.update(self._feed_additions)\n        return self._wrapped_session.run(fetches=fetches, feed_dict=feed_dict, **kwargs)",
    "docstring": "Pretends to be a session, inserts extra feeds on run().",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "ClassDef name:_SessionWithFeedDictAdditions FunctionDef name:__init__ arg:self arg:session arg:feed_additions arguments arg arg arg Assign Assign FunctionDef name:run arg:self arg:fetches arg:feed_dict arguments arg arg arg arg If Compare Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "PSUnavailableError",
    "source_code": "class PSUnavailableError(errors.UnavailableError):\n\n    def __init__(self, original_exception):\n        assert isinstance(original_exception, errors.UnavailableError)\n        self.original_exception = original_exception\n        super().__init__(original_exception.node_def, original_exception.op, original_exception.message)",
    "docstring": "Specifies that a parameter server is the unavailable task.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "ClassDef name:PSUnavailableError FunctionDef name:__init__ arg:self arg:original_exception arguments arg arg Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "MetaProxy",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass MetaProxy(Proxy):\n\n    def __init__(self, node: Node, tracer: 'Optional[TracerBase]'=None, fake_mode=None):\n        super().__init__(node, tracer)\n        self.fake_mode = fake_mode\n\n    def __repr__(self) -> str:\n        return f'MetaProxy({self.node.name})'\n\n    @classmethod\n    def __torch_function__(cls, orig_method, types, args=None, kwargs=None):\n        args = args if args else ()\n        kwargs = kwargs if kwargs else {}\n        meta_proxy = None\n        for arg in args:\n            if isinstance(arg, MetaProxy):\n                meta_proxy = arg\n                break\n        assert meta_proxy is not None, 'No MetaProxy found in arguments, but one is expected.'\n        proxy = super().__torch_function__(orig_method, types, args, kwargs)\n        with meta_proxy.fake_mode:\n            proxy.node.meta['val'] = orig_method(*[a.node.meta['val'] if isinstance(a, Proxy) else a for a in args], **kwargs)\n        return MetaProxy(proxy.node, proxy.tracer, meta_proxy.fake_mode)",
    "docstring": "A Proxy subclass that propagates metadata (meta['val']) during graph tracing.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\proxy.py",
    "ast_data": "ClassDef name:MetaProxy FunctionDef name:__init__ arg:self arg:node arg:tracer arg:fake_mode arguments arg arg arg arg Call Call Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__torch_function__ arg:cls arg:orig_method arg:types arg:args arg:kwargs arguments arg arg arg arg arg Assign Assign Assign For If Call Assign Compare Assign Call Call With Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "load_library",
    "source_code": "def load_library(self, path):\n    if torch._running_with_deploy():\n        return\n    path = _utils_internal.resolve_library_path(path)\n    with dl_open_guard():\n        ctypes.CDLL(path)\n    self.loaded_libraries.add(path)",
    "docstring": "Loads a shared library from the given path into the current process. The library being loaded may run global initialization code to register custom operators with the PyTorch JIT runtime. This allows dynamically loading custom operators. For this, you should compile your operator and the static registration code into a shared library object, and then call `` attribute, a set that may be inspected for the paths of all libraries loaded using this function. Args: path (str): A path to a shared library to load.",
    "type": "method",
    "file_path": "pytorch\\torch\\_ops.py",
    "ast_data": "FunctionDef name:load_library arg:self arg:path arguments arg arg If Call Return return:no Assign Call With Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_current_status",
    "source_code": "def get_current_status(self) -> Any:\n    for status in self.filtered_statuses:\n        if status['conclusion'] != PENDING:\n            return status\n    return None",
    "docstring": "When getting the current status, we want the latest status which is not pending, be it success or failure",
    "type": "method",
    "file_path": "pytorch\\tools\\alerts\\create_alerts.py",
    "ast_data": "FunctionDef name:get_current_status arg:self arguments arg For If Compare Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "DeallocFromPoolLine",
    "source_code": "@dataclasses.dataclass\nclass DeallocFromPoolLine(PoolMemoryPlanningLine):\n    is_last_pool_usage: bool = False\n\n    def codegen(self, code: IndentedBuffer):\n        if self.is_last_pool_usage:\n            assert self.group.allocation and self.group.allocation.pool\n            self.group.allocation.pool.codegen_destroy(self.wrapper, code)",
    "docstring": "Similar to FreeIfNotReusedLine, but takes memory from a pool",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "ClassDef name:DeallocFromPoolLine FunctionDef name:codegen arg:self arg:code arguments arg arg If BoolOp Call"
  },
  {
    "library": "tensorflow",
    "name": "_show_inputs_outputs_mgd",
    "source_code": "def _show_inputs_outputs_mgd(meta_graph_def, signature_def_key, indent):\n    inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key)\n    outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key)\n    indent_str = '  ' * indent\n\n    def in_print(s):\n        print(indent_str + s)\n    in_print('The given SavedModel SignatureDef contains the following input(s):')\n    for input_key, input_tensor in sorted(inputs_tensor_info.items()):\n        in_print(\"  inputs['%s'] tensor_info:\" % input_key)\n        _print_tensor_info(input_tensor, indent + 1)\n    in_print('The given SavedModel SignatureDef contains the following output(s):')\n    for output_key, output_tensor in sorted(outputs_tensor_info.items()):\n        in_print(\"  outputs['%s'] tensor_info:\" % output_key)\n        _print_tensor_info(output_tensor, indent + 1)\n    in_print('Method name is: %s' % meta_graph_def.signature_def[signature_def_key].method_name)",
    "docstring": "Prints input and output TensorInfos. Prints the details of input and output TensorInfos for the SignatureDef mapped by the given signature_def_key. Args: meta_graph_def: MetaGraphDef to inspect. signature_def_key: A SignatureDef key string. indent: How far (in increments of 2 spaces) to indent each line of output.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:_show_inputs_outputs_mgd arg:meta_graph_def arg:signature_def_key arg:indent arguments arg arg arg Assign Call Assign Call Assign FunctionDef name:in_print arg:s arguments arg Call Call For Call Call Call Call Call For Call Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "get_default_fcompiler",
    "source_code": "def get_default_fcompiler(osname=None, platform=None, requiref90=False, c_compiler=None):\n    matching_compiler_types = available_fcompilers_for_platform(osname, platform)\n    log.info(\"get_default_fcompiler: matching types: '%s'\", matching_compiler_types)\n    compiler_type = _find_existing_fcompiler(matching_compiler_types, osname=osname, platform=platform, requiref90=requiref90, c_compiler=c_compiler)\n    return compiler_type",
    "docstring": "Determine the default Fortran compiler to use for the given platform.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py",
    "ast_data": "FunctionDef name:get_default_fcompiler arg:osname arg:platform arg:requiref90 arg:c_compiler arguments arg arg arg arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_default_local_save_plan",
    "source_code": "def create_default_local_save_plan(state_dict: dict[str, Any], is_coordinator: bool) -> SavePlan:\n    requests = []\n    for fqn, obj in state_dict.items():\n        if isinstance(obj, DTensor):\n            if obj.device_mesh.get_coordinate() is not None:\n                requests += _create_write_items(fqn, obj)\n        else:\n            requests += _create_write_items(fqn, obj)\n    return SavePlan(requests)",
    "docstring": "Create the `` used by DefaultSavePlanner. On non-coordinator ranks, this function ignores tensors and non-tensor objects, only producing writes for ShardedTensor objects. On the coordinator rank, produce writes for all values.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py",
    "ast_data": "FunctionDef name:create_default_local_save_plan arg:state_dict arg:is_coordinator arguments arg arg Assign For Call If Call If Compare Call Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "load_whitening_model",
    "source_code": "def load_whitening_model(kernel_type: str, training_set: str) -> Dict[str, Any]:\n    whitening_models = torch.hub.load_state_dict_from_url(urls[kernel_type], map_location=torch.device('cpu'))\n    whitening_model = whitening_models[training_set]\n    return whitening_model",
    "docstring": "Load whitening model.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\mkd.py",
    "ast_data": "FunctionDef name:load_whitening_model arg:kernel_type arg:training_set arguments arg arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "home",
    "source_code": "def home(self):\n    return self.push(self._elements[0]) if self._elements else None",
    "docstring": "Push the first element onto the top of the stack. The first element is returned.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:home arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "get_sprite",
    "source_code": "def get_sprite(self, idx):\n    return self._spritelist[idx]",
    "docstring": "return the sprite at the index idx from the groups sprites LayeredUpdates.get_sprite(idx): return sprite Raises IndexOutOfBounds if the idx is not within range.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:get_sprite arg:self arg:idx arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_setup",
    "source_code": "def _get_setup(benchmark: GroupedBenchmark, runtime: RuntimeMode, language: Language, stmt: str, model_path: Optional[str]) -> str:\n    if language == Language.PYTHON:\n        setup = benchmark.setup.py_setup\n        model_setup = benchmark.py_model_setup\n    else:\n        assert language == Language.CPP\n        setup = benchmark.setup.cpp_setup\n        model_setup = benchmark.cpp_model_setup\n    if runtime == RuntimeMode.EAGER:\n        return '\\n'.join([setup, model_setup or ''])\n    assert runtime == RuntimeMode.JIT\n    assert model_path is not None\n    assert '\"' not in model_path\n    if language == Language.PYTHON:\n        setup_template: str = textwrap.dedent(f'\\n            jit_model = torch.jit.load(\"{model_path}\")\\n\\n            # Warmup `jit_model`\\n            for _ in range(3):\\n            {{stmt}}\\n        ')\n    else:\n        assert language == Language.CPP\n        setup_template = textwrap.dedent(f'\\n            const std::string fpath = \"{model_path}\";\\n            auto jit_model = torch::jit::load(fpath);\\n\\n            // Warmup `jit_model`\\n            for (int i = 0; i < 3; i++) {{{{\\n            {{stmt}}\\n            }}}}\\n        ')\n    model_load = setup_template.format(stmt=textwrap.indent(stmt, ' ' * 4))\n    return '\\n'.join([setup, model_load])",
    "docstring": "Specialize a GroupedBenchmark for a particular configuration. Setup requires two extra pieces of information: 1) The benchmark stmt. This is needed to warm up the model and avoid measuring lazy initialization. 2) The model path so we can load it during the benchmark. These are only used when .",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\core\\expand.py",
    "ast_data": "FunctionDef name:_get_setup arg:benchmark arg:runtime arg:language arg:stmt arg:model_path arguments arg arg arg arg arg If Compare Assign Assign Compare Assign Assign If Compare Return return:yes Call BoolOp Compare Compare Compare If Compare Call Compare Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "logout",
    "source_code": "def logout(self, request, extra_context=None):\n    from django.contrib.auth.views import LogoutView\n    defaults = {'extra_context': {**self.each_context(request), 'has_permission': False, **(extra_context or {})}}\n    if self.logout_template is not None:\n        defaults['template_name'] = self.logout_template\n    request.current_app = self.name\n    return LogoutView.as_view(**defaults)(request)",
    "docstring": "Log out the user for the given HttpRequest. This should *not* assume the user is already logged in.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:logout arg:self arg:request arg:extra_context arguments arg arg arg Assign Call BoolOp If Compare Assign Assign Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "trim_joins",
    "source_code": "def trim_joins(self, targets, joins, path):\n    joins = joins[:]\n    for pos, info in enumerate(reversed(path)):\n        if len(joins) == 1 or not info.direct:\n            break\n        if info.filtered_relation:\n            break\n        join_targets = {t.column for t in info.join_field.foreign_related_fields}\n        cur_targets = {t.column for t in targets}\n        if not cur_targets.issubset(join_targets):\n            break\n        targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets}\n        targets = tuple((targets_dict[t.column] for t in targets))\n        self.unref_alias(joins.pop())\n    return (targets, joins[-1], joins)",
    "docstring": "The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:trim_joins arg:self arg:targets arg:joins arg:path arguments arg arg arg arg Assign For Call Call If BoolOp Compare Call If Assign Assign If Call Assign Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, f: _T) -> _T:\n    gradient_registry.register(f, self._op_type)\n    return f",
    "docstring": "Registers the function as gradient function for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:f arguments arg arg Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_multi_take",
    "source_code": "def _multi_take(self, tup: tuple):\n    d = {axis: self._get_listlike_indexer(key, axis) for key, axis in zip(tup, self.obj._AXIS_ORDERS)}\n    return self.obj._reindex_with_indexers(d, allow_dups=True)",
    "docstring": "Create the indexers for the passed tuple of keys, and executes the take operation. This allows the take operation to be executed all at once, rather than once for each dimension. Improving efficiency. Parameters ---------- tup : tuple Tuple of indexers, one per axis. Returns ------- values: same type as the object being indexed",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_multi_take arg:self arg:tup arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_link_annotation",
    "source_code": "def _get_link_annotation(gc, x, y, width, height, angle=0):\n    quadpoints, rect = _get_coordinates_of_block(x, y, width, height, angle)\n    link_annotation = {'Type': Name('Annot'), 'Subtype': Name('Link'), 'Rect': rect, 'Border': [0, 0, 0], 'A': {'S': Name('URI'), 'URI': gc.get_url()}}\n    if angle % 90:\n        link_annotation['QuadPoints'] = quadpoints\n    return link_annotation",
    "docstring": "Create a link annotation object for embedding URLs.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:_get_link_annotation arg:gc arg:x arg:y arg:width arg:height arg:angle arguments arg arg arg arg arg arg Assign Call Assign Call Call Call Call If Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_preprocess_comments",
    "source_code": "def _preprocess_comments(iterable, comments, encoding):\n    for line in iterable:\n        if isinstance(line, bytes):\n            line = line.decode(encoding)\n        for c in comments:\n            line = line.split(c, 1)[0]\n        yield line",
    "docstring": "Generator that consumes a line iterated iterable and strips out the multiple (or multi-character) comments from lines. This is a pre-processing step to achieve feature parity with loadtxt (we assume that this feature is a nieche feature).",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_npyio_impl.py",
    "ast_data": "FunctionDef name:_preprocess_comments arg:iterable arg:comments arg:encoding arguments arg arg arg For If Call Assign Call For Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_construct_wrap_fn",
    "source_code": "def _construct_wrap_fn(root_module: nn.Module, target_module_to_kwargs: dict[nn.Module, dict[str, Any]], fsdp_fn: Callable) -> Callable[[nn.Module], Optional[nn.Module]]:\n\n    def fn(module: nn.Module) -> Optional[nn.Module]:\n        if module in target_module_to_kwargs and module is not root_module:\n            kwargs = target_module_to_kwargs[module]\n            return fsdp_fn(module, **kwargs)\n        return None\n    return fn",
    "docstring": "This constructs the \"wrap\" function to pass to :func: based on ``, which should be constructed from the wrapping policy.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py",
    "ast_data": "FunctionDef name:_construct_wrap_fn arg:root_module arg:target_module_to_kwargs arg:fsdp_fn arguments arg arg arg FunctionDef name:fn arg:module arguments arg If BoolOp Compare Compare Assign Return return:yes Call Return return:no Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "finish_plan",
    "source_code": "@abc.abstractmethod\ndef finish_plan(self, new_plan: SavePlan) -> SavePlan:\n    pass",
    "docstring": "Merge the plan created by and the result of . This is called on all ranks.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "FunctionDef name:finish_plan arg:self arg:new_plan arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "__iter__",
    "source_code": "def __iter__(self) -> Iterator:\n    return iter(self._info_axis)",
    "docstring": "Iterate over info axis. Returns ------- iterator Info axis as iterator. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]}) >>> for x in df: ... print(x) A B",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "cpu_baseline_flags",
    "source_code": "def cpu_baseline_flags(self):\n    return self.parse_baseline_flags",
    "docstring": "Returns a list of final CPU baseline compiler flags",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:cpu_baseline_flags arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_to_numpy",
    "source_code": "def _to_numpy(a):\n    if isinstance(a, ops.EagerTensor):\n        return a.numpy()\n    if isinstance(a, tensor.Tensor):\n        sess = ops.get_default_session()\n        return sess.run(a)\n    if isinstance(a, indexed_slices.IndexedSlicesValue):\n        arr = np.zeros(a.dense_shape)\n        assert len(a.values) == len(a.indices), 'IndexedSlicesValue has %s value slices but %s indices\\n%s' % (a.values, a.indices, a)\n        for values_slice, index in zip(a.values, a.indices):\n            assert 0 <= index < len(arr), 'IndexedSlicesValue has invalid index %s\\n%s' % (index, a)\n            arr[index] += values_slice\n        return arr\n    return a",
    "docstring": "Converts Tensors, EagerTensors, and IndexedSlicesValue to numpy arrays. Args: a: any value. Returns: If a is EagerTensor or Tensor, returns the evaluation of a by calling numpy() or run(). If a is IndexedSlicesValue, constructs the corresponding dense numpy array. Otherwise returns a unchanged.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\gradient_checker_v2.py",
    "ast_data": "FunctionDef name:_to_numpy arg:a arguments arg If Call Return return:yes Call If Call Assign Call Return return:yes Call If Call Assign Call Compare Call Call For Call Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_proxy",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef create_proxy(self, kind: str, target: Target, args: tuple[Any, ...], kwargs: dict[str, Any], name: Optional[str]=None, type_expr: Optional[Any]=None, proxy_factory_fn: Callable[[Node], 'Proxy']=None):\n    args_ = self.create_arg(args)\n    kwargs_ = self.create_arg(kwargs)\n    assert isinstance(args_, tuple)\n    assert isinstance(kwargs_, dict)\n    node = self.create_node(kind, target, args_, kwargs_, name, type_expr)\n    if not proxy_factory_fn:\n        proxy = self.proxy(node)\n    else:\n        proxy = proxy_factory_fn(node)\n    if self.record_stack_traces and (not proxy.node.stack_trace):\n        from torch.fx.experimental.symbolic_shapes import uninteresting_files\n        user_frame_summary = CapturedTraceback.extract().summary()\n        if user_frame_summary:\n            first_forward = -1\n            for i, frame in enumerate(user_frame_summary):\n                if frame.name == 'forward':\n                    user_frame_summary = user_frame_summary[i:]\n                    first_forward = i\n                    break\n            if first_forward == -1:\n                user_frame_summary = []\n            stack_trace = [frame for frame in user_frame_summary if frame.filename not in uninteresting_files()]\n            stack_trace = traceback.StackSummary.from_list(stack_trace)\n            proxy.node.stack_trace = ''.join(stack_trace.format()).strip()\n    return proxy",
    "docstring": "Create a Node from the given arguments, then return the Node wrapped in a Proxy object. If kind = 'placeholder', then we're creating a Node that represents the parameter of a function. If we need to encode a default parameter, we use the `` Nodes.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\proxy.py",
    "ast_data": "FunctionDef name:create_proxy arg:self arg:kind arg:target arg:args arg:kwargs arg:name arg:type_expr arg:proxy_factory_fn arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Call Call Assign Call If Assign Call Assign Call If BoolOp Assign Call Call If Assign For Call If Compare Assign Assign If Compare Assign Assign Compare Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "update_variables",
    "source_code": "def update_variables(self):\n    self.solver.updateVariables()",
    "docstring": "Update the variables for the solver attached to this layoutgrid.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:update_variables arg:self arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y=None, sample_weight=None):\n    check_is_fitted(self)\n    X = self._check_test_data(X)\n    sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)\n    _, scores = _labels_inertia_threadpool_limit(X, sample_weight, self.cluster_centers_, self._n_threads)\n    return -scores",
    "docstring": "Opposite of the value of X on the K-means objective. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data. y : Ignored Not used, present here for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None The weights for each observation in X. If None, all observations are assigned equal weight. Returns ------- score : float Opposite of the value of X on the K-means objective.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_storage",
    "source_code": "def load_storage(self, storage: Storage, offset: int=0) -> None:\n    assert self.handle is not None, 'Cannot load data from a file that is not registered.'\n    torch._C._gds_load_storage(self.handle, storage, offset)",
    "docstring": "Loads data from the file into the storage. This is a wrapper around `` into the storage. Args: storage (Storage): Storage to load data into. offset (int, optional): Offset into the file to start loading from. (Default: 0)",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\gds.py",
    "ast_data": "FunctionDef name:load_storage arg:self arg:storage arg:offset arguments arg arg arg Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "from_sparse_tensor_slices",
    "source_code": "@staticmethod\n@deprecation.deprecated(None, 'Use `tf.data.Dataset.from_tensor_slices()`.')\ndef from_sparse_tensor_slices(sparse_tensor):\n    from tensorflow.python.data.ops import from_sparse_tensor_slices_op\n    return from_sparse_tensor_slices_op._from_sparse_tensor_slices(sparse_tensor)",
    "docstring": "Splits each rank-N in this dataset row-wise. Args: sparse_tensor: A . Returns: Dataset: A of rank-(N-1) sparse tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:from_sparse_tensor_slices arg:sparse_tensor arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sv, sess, step_counter=None):\n    super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)\n    self._sv = sv\n    self._sess = sess\n    self._last_time = 0.0\n    self._last_step = 0\n    step_counter = sv.global_step if step_counter is None else step_counter\n    self._step_counter = step_counter\n    self._summary_tag = '%s/sec' % self._step_counter.op.name",
    "docstring": "Create a . Args: sv: A . sess: A . step_counter: A holding the step counter. By defaults, it uses sv.global_step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sv arg:sess arg:step_counter arguments arg arg arg arg Call Call Assign Assign Assign Assign Assign Compare Assign Assign"
  },
  {
    "library": "pygame",
    "name": "get_bottom_layer",
    "source_code": "def get_bottom_layer(self):\n    return self._spritelayers[self._spritelist[0]]",
    "docstring": "return the bottom layer LayeredUpdates.get_bottom_layer(): return layer",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:get_bottom_layer arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "SphinxFileInput",
    "source_code": "class SphinxFileInput(FileInput):\n\n    def __init__(self, *args: Any, **kwargs: Any) -> None:\n        kwargs['error_handler'] = 'sphinx'\n        super().__init__(*args, **kwargs)",
    "docstring": "A basic FileInput for Sphinx.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\io.py",
    "ast_data": "ClassDef name:SphinxFileInput FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_",
    "source_code": "@zeros_and_scatter.register_vmap\ndef _(info, indims, shape, indices, value):\n    indices_indims = indims[1]\n    expanded_indices = []\n    for idx, idx_indim in zip(indices, indices_indims):\n        if idx_indim is None:\n            expanded_indices.append(idx.expand(value.shape))\n        else:\n            assert idx.shape == value.shape\n            expanded_indices.append(idx)\n    out = torch.ops.flex_lib.zeros_and_scatter(shape, expanded_indices, value)\n    return (out, None)",
    "docstring": "The batching rule is special in that it returns a tensor that is not batched",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\_trace_wrapped_higher_order_op.py",
    "ast_data": "FunctionDef name:_ arg:info arg:indims arg:shape arg:indices arg:value arguments arg arg arg arg arg Assign Assign For Call If Compare Call Call Compare Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "get_submodule_paths",
    "source_code": "def get_submodule_paths():\n    root_directory = os.path.dirname(os.path.dirname(__file__))\n    gitmodule_file = os.path.join(root_directory, '.gitmodules')\n    with open(gitmodule_file) as gitmodules:\n        data = gitmodules.read().split('\\n')\n        submodule_paths = [datum.split(' = ')[1] for datum in data if datum.startswith('\\tpath = ')]\n        submodule_paths = [os.path.join(root_directory, path) for path in submodule_paths]\n    submodule_paths.append(os.path.join(root_directory, 'scipy/_lib/pyprima'))\n    return submodule_paths",
    "docstring": "Get paths to submodules so that we can exclude them from things like check_test_name.py, check_unicode.py, etc.",
    "type": "function",
    "file_path": "scipy\\tools\\get_submodule_paths.py",
    "ast_data": "FunctionDef name:get_submodule_paths arguments Assign Call Call Assign Call With Call Assign Call Call Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "desc_parameterlist",
    "source_code": "class desc_parameterlist(nodes.Part, nodes.Inline, nodes.FixedTextElement):\n    child_text_separator = ', '\n\n    def astext(self) -> str:\n        return f'({super().astext()})'",
    "docstring": "Node for a general parameter list. As default the parameter list is written in line with the rest of the signature. Set `` is True.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_parameterlist Assign FunctionDef name:astext arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "date_extract_sql",
    "source_code": "def date_extract_sql(self, lookup_type, sql, params):\n    raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')",
    "docstring": "Given a lookup_type of 'year', 'month', or 'day', return the SQL that extracts a value from the given date field field_name.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:date_extract_sql arg:self arg:lookup_type arg:sql arg:params arguments arg arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "aliased_name",
    "source_code": "def aliased_name(self, s):\n    aliases = ''.join((' or %s' % x for x in sorted(self.aliasd.get(s, []))))\n    return s + aliases",
    "docstring": "Return 'PROPNAME or alias' if *s* has an alias, else return 'PROPNAME'. For example, for the line markerfacecolor property, which has an alias, return 'markerfacecolor or mfc' and for the transform property, which does not, return 'transform'.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:aliased_name arg:self arg:s arguments arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "list_variables",
    "source_code": "@tf_export('train.list_variables')\ndef list_variables(ckpt_dir_or_file):\n    reader = load_checkpoint(ckpt_dir_or_file)\n    variable_map = reader.get_variable_to_shape_map()\n    names = sorted(variable_map.keys())\n    result = []\n    for name in names:\n        result.append((name, variable_map[name]))\n    return result",
    "docstring": "Lists the checkpoint keys and shapes of variables in a checkpoint. Checkpoint keys are paths in a checkpoint graph. Example usage: Args: ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint. Returns: List of tuples .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\checkpoint_utils.py",
    "ast_data": "FunctionDef name:list_variables arg:ckpt_dir_or_file arguments arg Assign Call Assign Call Assign Call Call Assign For Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "stage",
    "source_code": "def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:\n    if not self.cache_staged_state_dict:\n        staged_state_dict = _create_cpu_state_dict(state_dict)\n        _copy_state_dict(state_dict, staged_state_dict, type_check=self.type_check)\n        return staged_state_dict\n    if self.state_dict_cache is None:\n        self.state_dict_cache = _create_cpu_state_dict(state_dict, pin_memory=True)\n    return _copy_state_dict(state_dict, self.state_dict_cache)",
    "docstring": "Returns a copy of on the CPU.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\staging.py",
    "ast_data": "FunctionDef name:stage arg:self arg:state_dict arguments arg arg If Assign Call Call Return return:yes If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_acr",
    "source_code": "def get_acr(self, user) -> Optional[str]:\n    return None",
    "docstring": "Authentication Context Class Reference. Returns a user-defined case sensitive string indicating the class of authentication the used performed. Token audience may refuse to give access to some resources if some ACR criteria are not met. :ref: defines one special value: `ISO29115`_ level 1, and will be refused monetary operations. Developers MAY re-implement this method:: def get_acr(self, user): if user.insecure_session(): return \"0\" return \"urn:mace:incommon:iap:silver\" .. _ISO29115:",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc9068\\token.py",
    "ast_data": "FunctionDef name:get_acr arg:self arg:user arguments arg arg Return return:no"
  },
  {
    "library": "pandas",
    "name": "write",
    "source_code": "def write(self, obj, data_columns=None, **kwargs) -> None:\n    if not isinstance(obj, DataFrame):\n        name = obj.name or 'values'\n        obj = obj.to_frame(name)\n    super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs)",
    "docstring": "we are going to write this as a frame table",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:write arg:self arg:obj arg:data_columns arguments arg arg arg arg If Call Assign BoolOp Assign Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "parse_definition",
    "source_code": "def parse_definition(self, typ: str) -> None:\n    name = self.fetch_token()\n    self.context.append(name.value)\n    funcname = '.'.join(self.context)\n    if self.decorator:\n        start_pos = self.decorator.start[0]\n        self.decorator = None\n    else:\n        start_pos = name.start[0]\n    self.fetch_until([OP, ':'])\n    if self.fetch_token().match(COMMENT, NEWLINE):\n        self.fetch_until(INDENT)\n        self.indents.append((typ, funcname, start_pos))\n    else:\n        self.add_definition(funcname, (typ, start_pos, name.end[0]))\n        self.context.pop()",
    "docstring": "Parse AST of definition.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:parse_definition arg:self arg:typ arguments arg arg Assign Call Call Assign Call If Assign Assign Assign Call If Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "generate_bounding_box_proposals",
    "source_code": "@tf_export('image.generate_bounding_box_proposals')\n@dispatch.add_dispatch_support\ndef generate_bounding_box_proposals(scores, bbox_deltas, image_info, anchors, nms_threshold=0.7, pre_nms_topn=6000, min_size=16, post_nms_topn=300, name=None):\n    return gen_image_ops.generate_bounding_box_proposals(scores=scores, bbox_deltas=bbox_deltas, image_info=image_info, anchors=anchors, nms_threshold=nms_threshold, pre_nms_topn=pre_nms_topn, min_size=min_size, post_nms_topn=post_nms_topn, name=name)",
    "docstring": "Generate bounding box proposals from encoded bounding boxes. Args: scores: A 4-D float of shape containing scores of the boxes for given anchors, can be unsorted. bbox_deltas: A 4-D float of shape encoding boxes with respect to each anchor. Coordinates are given in the form . image_info: A 2-D float of shape containing image information Height, Width, Scale. anchors: A 2-D float of shape describing the anchor boxes. Boxes are formatted in the form . nms_threshold: A scalar float for non-maximal-suppression threshold. Defaults to 0.7. pre_nms_topn: A scalar int for the number of top scoring boxes to be used as input. Defaults to 6000. min_size: A scalar float . Any box that has a smaller size than min_size will be discarded. Defaults to 16. post_nms_topn: An integer. Maximum number of rois in the output. name: A name for this operation (optional). Returns: rois: Region of interest boxes sorted by their scores. roi_probabilities: scores of the ROI boxes in the ROIs' .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:generate_bounding_box_proposals arg:scores arg:bbox_deltas arg:image_info arg:anchors arg:nms_threshold arg:pre_nms_topn arg:min_size arg:post_nms_topn arg:name arguments arg arg arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "UnsupportedResponseTypeError",
    "source_code": "class UnsupportedResponseTypeError(OAuth2Error):\n    error = 'unsupported_response_type'\n\n    def __init__(self, response_type, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.response_type = response_type\n\n    def get_error_description(self):\n        return f'response_type={self.response_type} is not supported'",
    "docstring": "The authorization server does not support obtaining an access token using this method.",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\errors.py",
    "ast_data": "ClassDef name:UnsupportedResponseTypeError Assign FunctionDef name:__init__ arg:self arg:response_type arguments arg arg arg arg Call Call Assign FunctionDef name:get_error_description arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_FakeQuantWithMinMaxVarsGradient",
    "source_code": "@ops.RegisterGradient('FakeQuantWithMinMaxVars')\ndef _FakeQuantWithMinMaxVarsGradient(op: ops.Operation, grad):\n    return fake_quant_with_min_max_vars_gradient(grad, op.inputs[0], op.inputs[1], op.inputs[2], num_bits=op.get_attr('num_bits'), narrow_range=op.get_attr('narrow_range'))",
    "docstring": "Gradient for FakeQuantWithMinMaxVars op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_FakeQuantWithMinMaxVarsGradient arg:op arg:grad arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_where",
    "source_code": "def _where(self: Self, mask: npt.NDArray[np.bool_], value) -> Self:\n    value = self._validate_setitem_value(value)\n    res_values = np.where(mask, self._ndarray, value)\n    if res_values.dtype != self._ndarray.dtype:\n        raise AssertionError('Something has gone wrong, please report a bug at github.com/pandas-dev/pandas/')\n    return self._from_backing_data(res_values)",
    "docstring": "Analogue to np.where(mask, self, value) Parameters ---------- mask : np.ndarray[bool] value : scalar or listlike Raises ------ TypeError If value cannot be cast to self.dtype.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\_mixins.py",
    "ast_data": "FunctionDef name:_where arg:self arg:mask arg:value arguments arg arg arg Assign Call Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_real_batch_size",
    "source_code": "def get_real_batch_size(self, dataset_batch):\n    if isinstance(dataset_batch, (tuple, list)):\n        dataset_batch = dataset_batch[0]\n    assert nest.flatten(dataset_batch)\n\n    def _find_any_tensor(batch_features):\n        tensors = [x for x in nest.flatten(batch_features) if tensor_util.is_tf_type(x)]\n        if not tensors:\n            raise ValueError('Cannot find any Tensor in features dict.')\n        return tensors[0]\n    return backend.cast(backend.shape(_find_any_tensor(dataset_batch))[0], dtype='int64')",
    "docstring": "Returns the number of elements in a potentially partial batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\partial_batch_padding_handler.py",
    "ast_data": "FunctionDef name:get_real_batch_size arg:self arg:dataset_batch arguments arg arg If Call Assign Call FunctionDef name:_find_any_tensor arg:batch_features arguments arg Assign Call Call If Raise Call Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "silence",
    "source_code": "@tf_contextlib.contextmanager\ndef silence():\n    global _PRINT_DEPRECATION_WARNINGS\n    print_deprecation_warnings = _PRINT_DEPRECATION_WARNINGS\n    _PRINT_DEPRECATION_WARNINGS = False\n    yield\n    _PRINT_DEPRECATION_WARNINGS = print_deprecation_warnings",
    "docstring": "Temporarily silence deprecation warnings.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:silence arguments Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_win_folder_from_registry",
    "source_code": "def _get_win_folder_from_registry(csidl_name):\n    import winreg as _winreg\n    shell_folder_name = {'CSIDL_APPDATA': 'AppData', 'CSIDL_COMMON_APPDATA': 'Common AppData', 'CSIDL_LOCAL_APPDATA': 'Local AppData'}[csidl_name]\n    key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, 'Software\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Explorer\\\\Shell Folders')\n    dir, _type = _winreg.QueryValueEx(key, shell_folder_name)\n    return dir",
    "docstring": "This is a fallback technique at best. I'm not sure if using the registry for this guarantees us the correct answer for all CSIDL_* names.",
    "type": "function",
    "file_path": "pytorch\\torch\\_appdirs.py",
    "ast_data": "FunctionDef name:_get_win_folder_from_registry arg:csidl_name arguments arg Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "kernel_matrix",
    "source_code": "def kernel_matrix(x, kernel_func, out):\n    for i in range(x.shape[0]):\n        for j in range(i + 1):\n            out[i, j] = kernel_func(np.linalg.norm(x[i] - x[j]))\n            out[j, i] = out[i, j]",
    "docstring": "Evaluate RBFs, with centers at , at .",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_rbfinterp_pythran.py",
    "ast_data": "FunctionDef name:kernel_matrix arg:x arg:kernel_func arg:out arguments arg arg arg For Call For Call Assign Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "_is_in_terminal",
    "source_code": "def _is_in_terminal(self) -> bool:\n    return bool(self.max_cols == 0 or self.max_rows == 0)",
    "docstring": "Check if the output is to be shown in terminal.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:_is_in_terminal arg:self arguments arg Return return:yes Call BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "_conv3d_expanded_batch",
    "source_code": "def _conv3d_expanded_batch(input, filter, strides, padding, data_format, dilations=None, name=None):\n    shape = input.shape\n    ndims = getattr(shape, 'ndims', -1)\n    if ndims == -1:\n        ndims = len(shape)\n    if ndims in (5, 4, 3, 2, 1, 0, None):\n        return gen_nn_ops.conv3d(input, filter, strides, padding, data_format=data_format, dilations=dilations, name=name)\n    else:\n        return squeeze_batch_dims(input, functools.partial(gen_nn_ops.conv3d, filter=filter, strides=strides, padding=padding, data_format=data_format, dilations=dilations), inner_rank=4, name=name)",
    "docstring": "Helper function for ; handles expanded batches.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:_conv3d_expanded_batch arg:input arg:filter arg:strides arg:padding arg:data_format arg:dilations arg:name arguments arg arg arg arg arg arg arg Assign Assign Call If Compare Assign Call If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_root_scalar_halley_doc",
    "source_code": "def _root_scalar_halley_doc():\n    pass",
    "docstring": "Options ------- args : tuple, optional Extra arguments passed to the objective function and its derivatives. xtol : float, optional Tolerance (absolute) for termination. rtol : float, optional Tolerance (relative) for termination. maxiter : int, optional Maximum number of iterations. x0 : float, required Initial guess. fprime : bool or callable, required If is a boolean and is True, is assumed to return the value of derivative along with the objective function. can also be a callable returning the derivative of . In this case, it must accept the same arguments as . fprime2 : bool or callable, required If is a boolean and is True, is assumed to return the value of 1st and 2nd derivatives along with the objective function. can also be a callable returning the 2nd derivative of . In this case, it must accept the same arguments as . options: dict, optional Specifies any method-specific options not covered above.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_root_scalar.py",
    "ast_data": "FunctionDef name:_root_scalar_halley_doc arguments"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, num_participants):\n    self._num_participants = num_participants\n    self._counter = 0\n    self._flag = False\n    self._local_sense = threading.local()\n    self._lock = threading.Lock()\n    self._condition = threading.Condition()",
    "docstring": "Initializes the barrier object. Args: num_participants: an integer which is the expected number of calls of pass to through this barrier.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_participants arguments arg arg Assign Assign Assign Assign Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "generate",
    "source_code": "def generate(self) -> torch.fx.GraphModule:\n    self._generate_graph_inputs()\n    for line in self.lines:\n        if isinstance(line, WrapperLine):\n            line.codegen_fx(self)(line)\n        elif isinstance(line, LineContext):\n            pass\n        else:\n            raise NotImplementedError(textwrap.dedent(f\"\\n                    Found line of unrecognized type '{type(line)}':\\n                        '{line}'\\n\\n                    FX conversion only supports Wrapper IR lines.\\n                    \"))\n    self._generate_output()\n    self.gm.recompile()\n    return self.gm",
    "docstring": "Main entrypoint for FX codegen.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "FunctionDef name:generate arg:self arguments arg Call For If Call Call Call If Call Raise Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "needs_etag",
    "source_code": "def needs_etag(self, response):\n    cache_control_headers = cc_delim_re.split(response.get('Cache-Control', ''))\n    return all((header.lower() != 'no-store' for header in cache_control_headers))",
    "docstring": "Return True if an ETag header should be added to response.",
    "type": "method",
    "file_path": "django\\django\\middleware\\http.py",
    "ast_data": "FunctionDef name:needs_etag arg:self arg:response arguments arg arg Assign Call Call Return return:yes Call Compare Call"
  },
  {
    "library": "numpy",
    "name": "smallest_normal",
    "source_code": "@property\ndef smallest_normal(self):\n    if isnan(self._machar.smallest_normal.flat[0]):\n        warnings.warn('The value of smallest normal is undefined for double double', UserWarning, stacklevel=2)\n    return self._machar.smallest_normal.flat[0]",
    "docstring": "Return the value for the smallest normal. Returns ------- smallest_normal : float Value for the smallest normal. Warns ----- UserWarning If the calculated value for the smallest normal is requested for double-double.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\getlimits.py",
    "ast_data": "FunctionDef name:smallest_normal arg:self arguments arg If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_float_to_observed_mapping",
    "source_code": "def set_float_to_observed_mapping(self, float_class: type, observed_class: type, quant_type: QuantType=QuantType.STATIC) -> PrepareCustomConfig:\n    if quant_type != QuantType.STATIC:\n        raise ValueError('set_float_to_observed_mapping is currently only supported for static quantization')\n    if quant_type not in self.float_to_observed_mapping:\n        self.float_to_observed_mapping[quant_type] = {}\n    self.float_to_observed_mapping[quant_type][float_class] = observed_class\n    return self",
    "docstring": "Set the mapping from a custom float module class to a custom observed module class. The observed module class must have a `` class method that converts the float module class to the observed module class. This is currently only supported for static quantization.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:set_float_to_observed_mapping arg:self arg:float_class arg:observed_class arg:quant_type arguments arg arg arg arg If Compare Raise Call If Compare Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "inline_user_function_return",
    "source_code": "def inline_user_function_return(self, fn, args, kwargs):\n    if config.enable_faithful_generator_behavior and is_generator(fn.get_code()):\n        return self.inline_generator_function(fn, args, kwargs)\n    else:\n        return InliningInstructionTranslator.inline_call(self, fn, args, kwargs)",
    "docstring": "A call to some user defined function by inlining it.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py",
    "ast_data": "FunctionDef name:inline_user_function_return arg:self arg:fn arg:args arg:kwargs arguments arg arg arg arg If BoolOp Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fetch_lfw_people",
    "source_code": "def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None, min_faces_per_person=0):\n    person_names, file_paths = ([], [])\n    for person_name in sorted(listdir(data_folder_path)):\n        folder_path = join(data_folder_path, person_name)\n        if not isdir(folder_path):\n            continue\n        paths = [join(folder_path, f) for f in sorted(listdir(folder_path))]\n        n_pictures = len(paths)\n        if n_pictures >= min_faces_per_person:\n            person_name = person_name.replace('_', ' ')\n            person_names.extend([person_name] * n_pictures)\n            file_paths.extend(paths)\n    n_faces = len(file_paths)\n    if n_faces == 0:\n        raise ValueError('min_faces_per_person=%d is too restrictive' % min_faces_per_person)\n    target_names = np.unique(person_names)\n    target = np.searchsorted(target_names, person_names)\n    faces = _load_imgs(file_paths, slice_, color, resize)\n    indices = np.arange(n_faces)\n    np.random.RandomState(42).shuffle(indices)\n    faces, target = (faces[indices], target[indices])\n    return (faces, target, target_names)",
    "docstring": "Perform the actual data loading for the lfw people dataset This operation is meant to be cached by a joblib wrapper.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_lfw.py",
    "ast_data": "FunctionDef name:_fetch_lfw_people arg:data_folder_path arg:slice_ arg:color arg:resize arg:min_faces_per_person arguments arg arg arg arg arg Assign For Call Call Assign Call If Call Assign Call Call Call Assign Call If Compare Assign Call Call Call Assign Call If Compare Raise Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "name",
    "source_code": "@property\n@abstractmethod\ndef name(self) -> str:\n    pass",
    "docstring": "Get the name of the backend.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "__call__",
    "source_code": "def __call__(self, *, is_discrete=NotImplemented, event_dim=NotImplemented):\n    if is_discrete is NotImplemented:\n        is_discrete = self._is_discrete\n    if event_dim is NotImplemented:\n        event_dim = self._event_dim\n    return _Dependent(is_discrete=is_discrete, event_dim=event_dim)",
    "docstring": "Support for syntax to customize static attributes:: constraints.dependent(is_discrete=True, event_dim=1)",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg If Compare Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_rand2",
    "source_code": "def _rand2(self, samples):\n    r0, r1, r2, r3, r4 = samples[..., :5].T\n    bprime = self.population[r0] + self.scale * (self.population[r1] + self.population[r2] - self.population[r3] - self.population[r4])\n    return bprime",
    "docstring": "rand2bin, rand2exp",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:_rand2 arg:self arg:samples arguments arg arg Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    return f'S({self.dim})'",
    "docstring": "human readable representation of the Shard placement",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_different_indices",
    "source_code": "@staticmethod\ndef _get_different_indices(prev: list[list[bool]], curr: list[list[bool]]) -> list[PathOutputIndex]:\n    dead_indices = []\n    assert len(prev) <= len(curr)\n    for i, (outputs1, outputs2) in enumerate(zip(prev, curr)):\n        assert len(outputs1) == len(outputs2)\n        for j, (output1, output2) in enumerate(zip(outputs1, outputs2)):\n            if output1 != output2:\n                dead_indices.append((i, j))\n    return dead_indices",
    "docstring": "Find indices where the two lists differ.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:_get_different_indices arg:prev arg:curr arguments arg arg Assign Compare Call Call For Call Call Compare Call Call For Call Call If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "nonzero",
    "source_code": "@_onnx_symbolic('aten::nonzero')\n@symbolic_helper.parse_args('v')\ndef nonzero(g: jit_utils.GraphContext, input):\n    return t(g, g.op('NonZero', input))",
    "docstring": "Emitted from",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:nonzero arg:g arg:input arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "def fit_transform(self, X, y=None):\n    return self.fit(X, y).transform(X)",
    "docstring": "Transform a sequence of documents to a document-term matrix. Parameters ---------- X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- X : sparse matrix of shape (n_samples, n_features) Document-term matrix.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_create_like_index_sql",
    "source_code": "def _create_like_index_sql(self, model, field):\n    db_type = field.db_type(connection=self.connection)\n    if db_type is not None and (field.db_index or field.unique):\n        if '[' in db_type:\n            return None\n        collation_name = getattr(field, 'db_collation', None)\n        if not collation_name and field.is_relation:\n            collation_name = getattr(field.target_field, 'db_collation', None)\n        if collation_name and (not self._is_collation_deterministic(collation_name)):\n            return None\n        if db_type.startswith('varchar'):\n            return self._create_index_sql(model, fields=[field], suffix='_like', opclasses=['varchar_pattern_ops'])\n        elif db_type.startswith('text'):\n            return self._create_index_sql(model, fields=[field], suffix='_like', opclasses=['text_pattern_ops'])\n    return None",
    "docstring": "Return the statement to create an index with varchar operator pattern when the column type is 'varchar' or 'text', otherwise return None.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\postgresql\\schema.py",
    "ast_data": "FunctionDef name:_create_like_index_sql arg:self arg:model arg:field arguments arg arg arg Assign Call If BoolOp Compare BoolOp If Compare Return return:no Assign Call If BoolOp Assign Call If BoolOp Call Return return:no If Call Return return:yes Call If Call Return return:yes Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "pprint_getters",
    "source_code": "def pprint_getters(self):\n    lines = []\n    for name, val in sorted(self.properties().items()):\n        if getattr(val, 'shape', ()) != () and len(val) > 6:\n            s = str(val[:6]) + '...'\n        else:\n            s = str(val)\n        s = s.replace('\\n', ' ')\n        if len(s) > 50:\n            s = s[:50] + '...'\n        name = self.aliased_name(name)\n        lines.append(f'    {name} = {s}')\n    return lines",
    "docstring": "Return the getters and actual values as list of strings.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:pprint_getters arg:self arguments arg Assign For Call Call Call If BoolOp Compare Call Compare Call Assign Call Assign Call Assign Call If Compare Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "run_relax",
    "source_code": "def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):\n    raise NotImplementedError(f'{self.__class__.__name__} does not support run_relax() method')",
    "docstring": "Integrate from t=t0 to t>=t1 and return (y1,t).",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_ode.py",
    "ast_data": "FunctionDef name:run_relax arg:self arg:f arg:jac arg:y0 arg:t0 arg:t1 arg:f_params arg:jac_params arguments arg arg arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "EnforceUnique",
    "source_code": "class EnforceUnique:\n\n    def __init__(self):\n        self.seen = set()\n\n    def see(self, *key):\n        if key in self.seen:\n            raise RuntimeError('duplicate key: ' + str(key))\n        self.seen.add(key)",
    "docstring": "Raises an error if a key is seen more than once.",
    "type": "class",
    "file_path": "pytorch\\torch\\autograd\\profiler.py",
    "ast_data": "ClassDef name:EnforceUnique FunctionDef name:__init__ arg:self arguments arg Assign Call FunctionDef name:see arg:self arguments arg arg If Compare Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "__getstate__",
    "source_code": "def __getstate__(self):\n    state = self.__dict__\n    if IterDataPipe.getstate_hook is not None:\n        return IterDataPipe.getstate_hook(state)\n    return state",
    "docstring": "Serialize functions when is available. If this doesn't cover your custom DataPipe's use case, consider writing custom methods for and , or use for serialization.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\datapipe.py",
    "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Assign If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_reset_configs",
    "source_code": "def _reset_configs(self) -> None:\n    for field_name, field_obj in self.fields.items():\n        self._set_config(field_name, field_obj.default)",
    "docstring": "Reset all configs to their default values.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "FunctionDef name:_reset_configs arg:self arguments arg For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_SegmentMeanGrad",
    "source_code": "@ops.RegisterGradient('SegmentMean')\ndef _SegmentMeanGrad(op: ops.Operation, grad):\n    data_rank = array_ops.rank(op.inputs[0])\n    segment_ids_shape = array_ops.shape(op.inputs[1])\n    remaining_shape = array_ops.ones(array_ops.expand_dims(data_rank - 1, 0), dtype=segment_ids_shape.dtype)\n    ones_shape = array_ops.concat([segment_ids_shape, remaining_shape], 0)\n    ones = array_ops.ones(ones_shape, dtype=grad.dtype)\n    scaled_grad = math_ops.divide(grad, math_ops.segment_sum(ones, op.inputs[1]))\n    return (array_ops.gather(scaled_grad, op.inputs[1]), None)",
    "docstring": "Gradient for SegmentMean.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_SegmentMeanGrad arg:op arg:grad arguments arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_FractionalMaxPoolGrad",
    "source_code": "@ops.RegisterGradient('FractionalMaxPool')\ndef _FractionalMaxPoolGrad(op: ops.Operation, grad_0, unused_grad_1, unused_grad_2):\n    return gen_nn_ops.fractional_max_pool_grad(op.inputs[0], op.outputs[0], grad_0, op.outputs[1], op.outputs[2], op.get_attr('overlapping'))",
    "docstring": "Returns gradient for FractionalMaxPool. Since FractionalMaxPool has three outputs, there are three gradients passed in for each of the outputs. Only the first one is useful, the other two gradients are empty. Args: op: The FractionalMaxPoolOp. grad_0: Gradient with respect to op.outputs[0] unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty. unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty. Returns: Input backprop for FractionalMaxPool op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_grad.py",
    "ast_data": "FunctionDef name:_FractionalMaxPoolGrad arg:op arg:grad_0 arg:unused_grad_1 arg:unused_grad_2 arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "_get_raw_insecure_uri",
    "source_code": "def _get_raw_insecure_uri(self):\n    return '{scheme}://{host}{path}'.format(scheme=self.request.scheme, host=self.request._get_raw_host(), path=self.request.get_full_path())",
    "docstring": "Return an absolute URI from variables available in this request. Skip allowed hosts protection, so may return insecure URI.",
    "type": "method",
    "file_path": "django\\django\\views\\debug.py",
    "ast_data": "FunctionDef name:_get_raw_insecure_uri arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "extract_image_patches",
    "source_code": "@tf_export(v1=['image.extract_image_patches', 'extract_image_patches'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_args(None, 'ksizes is deprecated, use sizes instead', 'ksizes')\ndef extract_image_patches(images, ksizes=None, strides=None, rates=None, padding=None, name=None, sizes=None):\n    ksizes = deprecation.deprecated_argument_lookup('sizes', sizes, 'ksizes', ksizes)\n    return gen_array_ops.extract_image_patches(images, ksizes, strides, rates, padding, name)",
    "docstring": "Extract patches from images and put them in the \"depth\" output dimension. Args: : A . Must be one of the following types: , , , , , , , , , , , . 4-D Tensor with shape . : A list of that has length . The size of the sliding window for each dimension of . : A list of that has length . 1-D of length 4. How far the centers of two consecutive patches are in the images. Must be: . : A list of that has length . 1-D of length 4. Must be: . This is the input stride, specifying how far two consecutive patch samples are in the input. Equivalent to extracting patches with , followed by subsampling them spatially by a factor of . This is equivalent to in dilated (a.k.a. Atrous) convolutions. : A from: \"SAME\", \"VALID\". The type of padding algorithm to use. We specify the size-related attributes as: Returns: A Tensor. Has the same type as images.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:extract_image_patches arg:images arg:ksizes arg:strides arg:rates arg:padding arg:name arg:sizes arguments arg arg arg arg arg arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "saving",
    "source_code": "@contextlib.contextmanager\ndef saving(self, fig, outfile, dpi, *args, **kwargs):\n    if mpl.rcParams['savefig.bbox'] == 'tight':\n        _log.info(\"Disabling savefig.bbox = 'tight', as it may cause frame size to vary, which is inappropriate for animation.\")\n    self.setup(fig, outfile, dpi, *args, **kwargs)\n    with mpl.rc_context({'savefig.bbox': None}):\n        try:\n            yield self\n        finally:\n            self.finish()",
    "docstring": "Context manager to facilitate writing the movie file. `setup`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:saving arg:self arg:fig arg:outfile arg:dpi arguments arg arg arg arg arg arg If Compare Call Call With Call Try Call"
  },
  {
    "library": "numpy",
    "name": "_decode_line",
    "source_code": "def _decode_line(line, encoding=None):\n    if type(line) is bytes:\n        if encoding is None:\n            encoding = 'latin1'\n        line = line.decode(encoding)\n    return line",
    "docstring": "Decode bytes from binary input streams. Defaults to decoding from 'latin1'. Parameters ---------- line : str or bytes Line to be decoded. encoding : str Encoding used to decode . Returns ------- decoded_line : str",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "FunctionDef name:_decode_line arg:line arg:encoding arguments arg arg If Compare Call If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "delete",
    "source_code": "def delete(self, dims: List[int]) -> 'Layout':\n    if not isinstance(dims, list):\n        dims = [dims]\n    new_specs = [spec for i, spec in enumerate(self.sharding_specs) if i not in dims]\n    return Layout(new_specs, self.mesh)",
    "docstring": "Returns the layout with the give dimensions deleted.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:delete arg:self arg:dims arguments arg arg If Call Assign Assign Call Compare Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "finish",
    "source_code": "def finish(self) -> None:\n    pass",
    "docstring": "Finish the building process. The default implementation does nothing.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:finish arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "check_branch",
    "source_code": "def check_branch(subcommand: str, branch: str | None) -> str | None:\n    if subcommand != 'checkout':\n        return None\n    if branch is None:\n        return \"Branch name to checkout must be supplied with '-b' option\"\n    cmd = git('status', '--untracked-files=no', '--porcelain')\n    stdout = subprocess.check_output(cmd, text=True, encoding='utf-8')\n    if stdout.strip():\n        return 'Need to have clean working tree to checkout!\\n\\n' + stdout\n    cmd = git('show-ref', '--verify', '--quiet', f'refs/heads/{branch}')\n    p = subprocess.run(cmd, capture_output=True, check=False)\n    if not p.returncode:\n        return f'Branch {branch!r} already exists'\n    return None",
    "docstring": "Checks that the branch name can be checked out.",
    "type": "function",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:check_branch arg:subcommand arg:branch arguments arg arg If Compare Return return:no If Compare Return return:yes Assign Call Assign Call If Call Return return:yes Assign Call Assign Call If Return return:yes Return return:no"
  },
  {
    "library": "numpy",
    "name": "integ",
    "source_code": "def integ(self, m=1, k=0):\n    return poly1d(polyint(self.coeffs, m=m, k=k))",
    "docstring": "Return an antiderivative (indefinite integral) of this polynomial. Refer to for full documentation. See Also -------- polyint : equivalent function",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_polynomial_impl.py",
    "ast_data": "FunctionDef name:integ arg:self arg:m arg:k arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_ortho_rotation",
    "source_code": "def _ortho_rotation(components, method='varimax', tol=1e-06, max_iter=100):\n    nrow, ncol = components.shape\n    rotation_matrix = np.eye(ncol)\n    var = 0\n    for _ in range(max_iter):\n        comp_rot = np.dot(components, rotation_matrix)\n        if method == 'varimax':\n            tmp = comp_rot * np.transpose((comp_rot ** 2).sum(axis=0) / nrow)\n        elif method == 'quartimax':\n            tmp = 0\n        u, s, v = np.linalg.svd(np.dot(components.T, comp_rot ** 3 - tmp))\n        rotation_matrix = np.dot(u, v)\n        var_new = np.sum(s)\n        if var != 0 and var_new < var * (1 + tol):\n            break\n        var = var_new\n    return np.dot(components, rotation_matrix).T",
    "docstring": "Return rotated components.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_factor_analysis.py",
    "ast_data": "FunctionDef name:_ortho_rotation arg:components arg:method arg:tol arg:max_iter arguments arg arg arg arg Assign Assign Call Assign For Call Assign Call If Compare Assign Call Call If Compare Assign Assign Call Call Assign Call Assign Call If BoolOp Compare Compare Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "ToolForward",
    "source_code": "class ToolForward(ViewsPositionsBase):\n    description = 'Forward to next view'\n    image = 'mpl-data/images/forward'\n    default_keymap = property(lambda self: mpl.rcParams['keymap.forward'])\n    _on_trigger = 'forward'",
    "docstring": "Move forward in the view lim stack.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ToolForward Assign Assign Assign Call arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "ScopeContextManager",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass ScopeContextManager:\n\n    def __init__(self, scope: Scope, current_scope: Scope):\n        super().__init__()\n        self._prev_scope = copy.copy(scope)\n        scope.module_path = current_scope.module_path\n        scope.module_type = current_scope.module_type\n        self._scope = scope\n\n    def __enter__(self):\n        return self._scope\n\n    def __exit__(self, *args):\n        self._scope.module_path = self._prev_scope.module_path\n        self._scope.module_type = self._prev_scope.module_type\n        return",
    "docstring": "A context manager to track the Scope of Node during symbolic tracing. When entering a forward function of a Module, we'll update the scope information of the current module, and when we exit, we'll restore the previous scope information.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\proxy.py",
    "ast_data": "ClassDef name:ScopeContextManager FunctionDef name:__init__ arg:self arg:scope arg:current_scope arguments arg arg arg Call Call Assign Call Assign Assign Assign FunctionDef name:__enter__ arg:self arguments arg Return return:yes FunctionDef name:__exit__ arg:self arguments arg arg Assign Assign Return return:no Call"
  },
  {
    "library": "tensorflow",
    "name": "mesh",
    "source_code": "@property\ndef mesh(self):\n    return self._mesh",
    "docstring": "Returns the mesh used by the strategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:mesh arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "path_of_module",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef path_of_module(self, mod: torch.nn.Module) -> str:\n    if self.submodule_paths:\n        path = self.submodule_paths.get(mod)\n        if path is None:\n            raise NameError('module is not installed as a submodule')\n        assert isinstance(path, str)\n        return path\n    else:\n        for n, p in self.root.named_modules():\n            if mod is p:\n                return n\n        raise NameError('module is not installed as a submodule')",
    "docstring": "Helper method to find the qualified name of `` to retrieve the qualified name for.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:path_of_module arg:self arg:mod arguments arg arg If Assign Call If Compare Raise Call Call Return return:yes For Call If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "cherrypy",
    "name": "subscribe",
    "source_code": "def subscribe(self):\n    self.bus.subscribe('start', self.start)\n    self.bus.subscribe('stop', self.stop)",
    "docstring": "Subscribe control methods to the bus lifecycle events.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:subscribe arg:self arguments arg Call Call"
  },
  {
    "library": "scipy",
    "name": "_matrix_vector_product_of_stacks",
    "source_code": "def _matrix_vector_product_of_stacks(A, b):\n    return np.einsum('ijk,ik->ij', A, b)",
    "docstring": "Compute the product of stack of matrices and vectors.",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\transform\\_rotation_spline.py",
    "ast_data": "FunctionDef name:_matrix_vector_product_of_stacks arg:A arg:b arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "read_metadata",
    "source_code": "def read_metadata(self, key: str):\n    if getattr(getattr(self.group, 'meta', None), key, None) is not None:\n        return self.parent.select(self._get_metadata_path(key))\n    return None",
    "docstring": "return the meta data array for this key",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:read_metadata arg:self arg:key arguments arg arg If Compare Call Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "convert_to_tensor_or_ragged_tensor",
    "source_code": "def convert_to_tensor_or_ragged_tensor(value, dtype=None, preferred_dtype=None, name=None):\n    if isinstance(value, RaggedTensor):\n        if dtype and (not dtype.is_compatible_with(value.dtype)):\n            raise ValueError(f'Tensor conversion requested dtype {dtype.name} for RaggedTensor with dtype {value.dtype.name}: {value}.')\n        return value\n    elif isinstance(value, ragged_tensor_value.RaggedTensorValue):\n        with ops.name_scope(name, 'ConvertToTensorOrRaggedTensor', []):\n            flat_values = ops.convert_to_tensor(value=value.flat_values, dtype=dtype, dtype_hint=preferred_dtype, name='flat_values')\n            return RaggedTensor.from_nested_row_splits(flat_values, value.nested_row_splits, validate=False)\n    else:\n        return tensor_conversion.convert_to_tensor_v2_with_dispatch(value=value, dtype=dtype, dtype_hint=preferred_dtype, name=name)",
    "docstring": "Converts value to a or . * If is a , then return it as-is. * If is a , return a corresponding constant . * Otherwise, use to convert to a . Args: value: A , a , or an object whose type has a registered conversion function. dtype: Optional element type for the returned tensor. If missing the type is inferred from the type of . preferred_dtype: Optional element type for the returned tensor, used when dtype is None. This argument has no effect if is already a tensor, or when conversion is not possible. name: Optional name to use if a new is created. Returns: A or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:convert_to_tensor_or_ragged_tensor arg:value arg:dtype arg:preferred_dtype arg:name arguments arg arg arg arg If Call If BoolOp Call Raise Call Return return:yes If Call With Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_uniform",
    "source_code": "def is_uniform(self):\n    return self._uniform_row_length is not None",
    "docstring": "Returns true if the partition is known to be uniform statically. This is based upon the existence of self._uniform_row_length. For example: RowPartition.from_row_lengths([3,3,3]).is_uniform()==false RowPartition.from_uniform_row_length(5, nvals=20).is_uniform()==true RowPartition.from_row_lengths([2,0,2]).is_uniform()==false Returns: Whether a RowPartition is known to be uniform statically.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:is_uniform arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "transform_feature",
    "source_code": "def transform_feature(self, transformation_cache, state_manager):\n    source_tensor = transformation_cache.get(self.source_column, state_manager)\n    return math_ops._bucketize(source_tensor, boundaries=self.boundaries)",
    "docstring": "Returns bucketized categorical tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_transformers",
    "source_code": "def _validate_transformers(self):\n    if not self.transformers:\n        return\n    names, transformers, _ = zip(*self.transformers)\n    self._validate_names(names)\n    for t in transformers:\n        if t in ('drop', 'passthrough'):\n            continue\n        if not (hasattr(t, 'fit') or hasattr(t, 'fit_transform')) or not hasattr(t, 'transform'):\n            raise TypeError(\"All estimators should implement fit and transform, or can be 'drop' or 'passthrough' specifiers. '%s' (type %s) doesn't.\" % (t, type(t)))",
    "docstring": "Validate names of transformers and the transformers themselves. This checks whether given transformers have the required methods, i.e. or and implemented.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:_validate_transformers arg:self arguments arg If Return return:no Assign Call Call For If Compare If BoolOp BoolOp Call Call Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_cluster_spec_to_device_list",
    "source_code": "def _cluster_spec_to_device_list(cluster_spec, num_gpus_per_worker):\n    cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)\n    devices = []\n    for task_type in ('chief', 'worker'):\n        for task_id in range(len(cluster_spec.as_dict().get(task_type, []))):\n            if num_gpus_per_worker == 0:\n                devices.append('/job:%s/task:%d/device:CPU:0' % (task_type, task_id))\n            else:\n                devices.extend(['/job:%s/task:%d/device:GPU:%i' % (task_type, task_id, gpu_id) for gpu_id in range(num_gpus_per_worker)])\n    return devices",
    "docstring": "Returns a device list given a cluster spec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:_cluster_spec_to_device_list arg:cluster_spec arg:num_gpus_per_worker arguments arg arg Assign Call Assign For For Call Call Call Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "yuv422_to_rgb",
    "source_code": "def yuv422_to_rgb(imagey: Tensor, imageuv: Tensor) -> Tensor:\n    if not isinstance(imagey, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(imagey)}')\n    if not isinstance(imageuv, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(imageuv)}')\n    if len(imagey.shape) < 3 or imagey.shape[-3] != 1:\n        raise ValueError(f'Input imagey size must have a shape of (*, 1, H, W). Got {imagey.shape}')\n    if len(imageuv.shape) < 3 or imageuv.shape[-3] != 2:\n        raise ValueError(f'Input imageuv size must have a shape of (*, 2, H, W/2). Got {imageuv.shape}')\n    if len(imagey.shape) < 2 or imagey.shape[-2] % 2 == 1 or imagey.shape[-1] % 2 == 1:\n        raise ValueError(f'Input H&W must be evenly disible by 2. Got {imagey.shape}')\n    if len(imageuv.shape) < 2 or len(imagey.shape) < 2 or imagey.shape[-1] / imageuv.shape[-1] != 2:\n        raise ValueError(f'Input imageuv W must be half the size of the luma plane. Got {imagey.shape} and {imageuv.shape}')\n    yuv444image = torch.cat([imagey, imageuv.repeat_interleave(2, dim=-1)], dim=-3)\n    return yuv_to_rgb(yuv444image)",
    "docstring": "Convert an YUV422 image to RGB. Input need to be padded to be evenly divisible by 2 vertical. The image data is assumed to be in the range of :math: for luma (Y). The ranges of U and V are :math: and :math:, respectively. YUV formula follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Args: imagey: Y (luma) Image plane to be converted to RGB with shape :math:. imageuv: UV (luma) Image planes to be converted to RGB with shape :math:. Returns: RGB version of the image with shape :math:. Example: >>> inputy = torch.rand(2, 1, 4, 6) >>> inputuv = torch.rand(2, 2, 2, 3) >>> output = yuv420_to_rgb(inputy, inputuv) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\yuv.py",
    "ast_data": "FunctionDef name:yuv422_to_rgb arg:imagey arg:imageuv arguments arg arg If Call Raise Call Call If Call Raise Call Call If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Raise Call If BoolOp Compare Call Compare Compare Raise Call If BoolOp Compare Call Compare Call Compare Raise Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_sanitize_and_check",
    "source_code": "def _sanitize_and_check(indexes):\n    kinds = {type(index) for index in indexes}\n    if list in kinds:\n        if len(kinds) > 1:\n            indexes = [Index(list(x)) if not isinstance(x, Index) else x for x in indexes]\n            kinds -= {list}\n        else:\n            return (indexes, 'list')\n    if len(kinds) > 1 or Index not in kinds:\n        return (indexes, 'special')\n    else:\n        return (indexes, 'array')",
    "docstring": "Verify the type of indexes and convert lists to Index. Cases: - [list, list, ...]: Return ([list, list, ...], 'list') - [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...]) Lists are sorted and converted to Index. - [Index, Index, ...]: Return ([Index, Index, ...], TYPE) TYPE = 'special' if at least one special type, 'array' otherwise. Parameters ---------- indexes : list of Index or list objects Returns ------- sanitized_indexes : list of Index or list objects type : {'list', 'array', 'special'}",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\api.py",
    "ast_data": "FunctionDef name:_sanitize_and_check arg:indexes arguments arg Assign Call If Compare If Compare Call Assign Call Call Call Return return:yes If BoolOp Compare Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_calculate_quad_point_coordinates",
    "source_code": "def _calculate_quad_point_coordinates(x, y, width, height, angle=0):\n    angle = math.radians(-angle)\n    sin_angle = math.sin(angle)\n    cos_angle = math.cos(angle)\n    a = x + height * sin_angle\n    b = y + height * cos_angle\n    c = x + width * cos_angle + height * sin_angle\n    d = y - width * sin_angle + height * cos_angle\n    e = x + width * cos_angle\n    f = y - width * sin_angle\n    return ((x, y), (e, f), (c, d), (a, b))",
    "docstring": "Calculate the coordinates of rectangle when rotated by angle around x, y",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:_calculate_quad_point_coordinates arg:x arg:y arg:width arg:height arg:angle arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "_deprecation_warning",
    "source_code": "def _deprecation_warning(module: str, attribute: str, canonical_name: str='', *, remove: tuple[int, int], raises: bool=False) -> None:\n    if remove == (9, 0):\n        warning_class: type[Warning] = RemovedInSphinx90Warning\n    elif remove == (10, 0):\n        warning_class = RemovedInSphinx10Warning\n    else:\n        msg = f'removal version {remove!r} is invalid!'\n        raise RuntimeError(msg)\n    qualname = f'{module}.{attribute}'\n    if canonical_name:\n        message = f'The alias {qualname!r} is deprecated, use {canonical_name!r} instead.'\n    else:\n        message = f'{qualname!r} is deprecated.'\n    if raises:\n        raise AttributeError(message)\n    message = f'{message} Check CHANGES for Sphinx API modifications.'\n    warnings.warn(message, warning_class, stacklevel=3)",
    "docstring": "Helper function for module-level deprecations using `AttributeError` is raised instead of emitting a warning so that it is easy to locate deprecated objects in tests that could suppress deprecation warnings. Usage:: # deprecated name -> (object to return, canonical path or empty string, removal version) _DEPRECATED_OBJECTS = { 'deprecated_name': ( object_to_return, 'fully_qualified_replacement_name', (9, 0), ), } def __getattr__(name: str) -> Any: if name not in _DEPRECATED_OBJECTS: msg = f'module {__name__!r} has no attribute {name!r}' raise AttributeError(msg) from sphinx.deprecation import _deprecation_warning deprecated_object, canonical_name, remove = _DEPRECATED_OBJECTS[name] _deprecation_warning(__name__, name, canonical_name, remove=remove) return deprecated_object",
    "type": "function",
    "file_path": "sphinx\\sphinx\\deprecation.py",
    "ast_data": "FunctionDef name:_deprecation_warning arg:module arg:attribute arg:canonical_name arguments arg arg arg arg arg If Compare If Compare Assign Assign Raise Call Assign If Assign Assign If Raise Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "_proj_transform_clip",
    "source_code": "def _proj_transform_clip(xs, ys, zs, M, focal_length):\n    vec = _vec_pad_ones(xs, ys, zs)\n    return _proj_transform_vec_clip(vec, M, focal_length)",
    "docstring": "Transform the points by the projection matrix and return the clipping result returns txs, tys, tzs, tis",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\proj3d.py",
    "ast_data": "FunctionDef name:_proj_transform_clip arg:xs arg:ys arg:zs arg:M arg:focal_length arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "fake_signature",
    "source_code": "def fake_signature(fn: Callable[_P, R], nargs: int) -> Callable[_P, R]:\n    argnames = ','.join((f'arg{i}' for i in range(nargs)))\n    return eval(f'lambda {argnames}: fn({argnames})', {'fn': fn})",
    "docstring": "FX gets confused by varargs, de-confuse it",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py",
    "ast_data": "FunctionDef name:fake_signature arg:fn arg:nargs arguments arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "new_group",
    "source_code": "@_time_logger\ndef new_group(ranks=None, timeout=None, backend=None, pg_options=None, use_local_synchronization=False, group_desc=None, device_id: Optional[torch.device]=None):\n    return _new_group_with_tag(ranks, timeout, backend, pg_options, None, use_local_synchronization=use_local_synchronization, group_desc=group_desc, device_id=device_id)",
    "docstring": "Create a new distributed group. This function requires that all processes in the main group (i.e. all processes that are part of the distributed job) enter this function, even if they are not going to be members of the group. Additionally, groups should be created in the same order in all processes. .. warning:: Safe concurrent usage: When using multiple process groups with the `work.wait()Using multiple NCCL communicators concurrently init_process_groupBackendnew_group`. N.B. use_local_synchronization doesn't work with MPI. N.B. While use_local_synchronization=True can be significantly faster with larger clusters and small process groups, care must be taken since it changes cluster behavior as non-member ranks don't join the group barrier(). N.B. use_local_synchronization=True can lead to deadlocks when each rank creates multiple overlaping process groups. To avoid that, make sure all ranks follow the same global creation order.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:new_group arg:ranks arg:timeout arg:backend arg:pg_options arg:use_local_synchronization arg:group_desc arg:device_id arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_destroy_resource",
    "source_code": "def _destroy_resource(self):\n    return self._coordinator_instance._destroy_resource()",
    "docstring": "A function that destroys the resource.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:_destroy_resource arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_parse_args",
    "source_code": "def _parse_args():\n    parser = argparse.ArgumentParser(description='preprocess_coco_minival: Preprocess COCO minival dataset')\n    parser.add_argument('--images_folder', type=str, help='Full path of the validation images folder.', required=True)\n    parser.add_argument('--instances_file', type=str, help='Full path of the input JSON file, like instances_val20xx.json.', required=True)\n    parser.add_argument('--allowlist_file', type=str, help='File with COCO image ids to preprocess, one on each line.', required=False)\n    parser.add_argument('--num_images', type=int, help='Number of allowlisted images to preprocess into the output folder.', required=False)\n    parser.add_argument('--output_folder', type=str, help='Full path to output images & text proto files into.', required=True)\n    return parser.parse_known_args(args=sys.argv[1:])[0]",
    "docstring": "Creates a parser that parse the command line arguments. Returns: A namespace parsed from command line arguments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\evaluation\\tasks\\coco_object_detection\\preprocess_coco_minival.py",
    "ast_data": "FunctionDef name:_parse_args arguments Assign Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_matmat",
    "source_code": "def _matmat(self, X):\n    return np.hstack([self.matvec(col.reshape(-1, 1)) for col in X.T])",
    "docstring": "Default matrix-matrix multiplication handler. Falls back on the user-defined _matvec method, so defining that will define matrix multiplication (though in a very suboptimal way).",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py",
    "ast_data": "FunctionDef name:_matmat arg:self arg:X arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_fill_non_empty_info",
    "source_code": "def _fill_non_empty_info(self) -> None:\n    self.add_object_type_line()\n    self.add_index_range_line()\n    self.add_dtypes_line()\n    if self.display_memory_usage:\n        self.add_memory_usage_line()",
    "docstring": "Add lines to the info table, pertaining to non-empty series.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_fill_non_empty_info arg:self arguments arg Call Call Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "set",
    "source_code": "def set(self, object_id, obj):\n    if object_id is None:\n        return\n    self._obj_ids_to_obj[object_id] = obj",
    "docstring": "Stores an instantiated object for future lookup and sharing.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:set arg:self arg:object_id arg:obj arguments arg arg arg If Compare Return return:no Assign"
  },
  {
    "library": "tensorflow",
    "name": "WorkerConfig",
    "source_code": "@tf_export('data.experimental.service.WorkerConfig')\nclass WorkerConfig(collections.namedtuple('WorkerConfig', ['dispatcher_address', 'worker_address', 'port', 'protocol', 'heartbeat_interval_ms', 'dispatcher_timeout_ms', 'data_transfer_protocol', 'data_transfer_address'])):\n\n    def __new__(cls, dispatcher_address, worker_address=None, port=0, protocol=None, heartbeat_interval_ms=None, dispatcher_timeout_ms=None, data_transfer_protocol=None, data_transfer_address=None):\n        if worker_address is None:\n            worker_address = 'localhost:%port%'\n        if protocol is None:\n            protocol = _pywrap_utils_exp.TF_DATA_DefaultProtocol()\n        if data_transfer_address is None:\n            data_transfer_address = 'localhost:%dts_port%'\n        heartbeat_interval_ms = _get_time_or_placeholder(heartbeat_interval_ms)\n        dispatcher_timeout_ms = _get_time_or_placeholder(dispatcher_timeout_ms)\n        return super(WorkerConfig, cls).__new__(cls, dispatcher_address, worker_address, port, protocol, heartbeat_interval_ms, dispatcher_timeout_ms, data_transfer_protocol, data_transfer_address)",
    "docstring": "Configuration class for tf.data service dispatchers. Fields: dispatcher_address: Specifies the address of the dispatcher. worker_address: Specifies the address of the worker server. This address is passed to the dispatcher so that the dispatcher can tell clients how to connect to this worker. port: Specifies the port to bind to. A value of 0 indicates that the worker can bind to any available port. protocol: A string indicating the protocol to be used by the worker to connect to the dispatcher. E.g. \"grpc\". heartbeat_interval_ms: How often the worker should heartbeat to the dispatcher, in milliseconds. If not set, the runtime will select a reasonable default. A higher value will reduce the load on the dispatcher, while a lower value will reduce the time it takes to reclaim resources from finished jobs. dispatcher_timeout_ms: How long, in milliseconds, to retry requests to the dispatcher before giving up and reporting an error. Defaults to 1 hour. data_transfer_protocol: A string indicating the protocol to be used by the worker to transfer data to the client. E.g. \"grpc\". data_transfer_address: A string indicating the data transfer address of the worker server.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "ClassDef name:WorkerConfig Call FunctionDef name:__new__ arg:cls arg:dispatcher_address arg:worker_address arg:port arg:protocol arg:heartbeat_interval_ms arg:dispatcher_timeout_ms arg:data_transfer_protocol arg:data_transfer_address arguments arg arg arg arg arg arg arg arg arg If Compare Assign If Compare Assign Call If Compare Assign Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "call",
    "source_code": "def call(self, inputs, state):\n    _check_rnn_cell_input_dtypes([inputs, state])\n    gate_inputs = math_ops.matmul(array_ops.concat([inputs, state], 1), self._gate_kernel)\n    gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias)\n    value = math_ops.sigmoid(gate_inputs)\n    r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n    r_state = r * state\n    candidate = math_ops.matmul(array_ops.concat([inputs, r_state], 1), self._candidate_kernel)\n    candidate = nn_ops.bias_add(candidate, self._candidate_bias)\n    c = self._activation(candidate)\n    new_h = u * state + (1 - u) * c\n    return (new_h, new_h)",
    "docstring": "Gated recurrent unit (GRU) with nunits cells.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:call arg:self arg:inputs arg:state arguments arg arg arg Call Assign Call Call Assign Call Assign Call Assign Call Assign Assign Call Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Sigmoid",
    "source_code": "class Sigmoid(Module):\n\n    def forward(self, input: Tensor) -> Tensor:\n        return torch.sigmoid(input)",
    "docstring": "Applies the Sigmoid function element-wise. .. math:: \\text{Sigmoid}(x) = \\sigma(x) = \\frac{1}{1 + \\exp(-x)} Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/Sigmoid.png Examples:: >>> m = nn.Sigmoid() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:Sigmoid FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "generate_key",
    "source_code": "@classmethod\ndef generate_key(cls, kty, crv_or_size, options=None, is_private=False):\n    key_cls = cls.JWK_KEY_CLS[kty]\n    return key_cls.generate_key(crv_or_size, options, is_private)",
    "docstring": "Generate a Key with the given key type, curve name or bit size. :param kty: string of `` :param crv_or_size: curve name or bit size :param options: a dict of other options for Key :param is_private: create a private key or public key :return: Key instance",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7517\\jwk.py",
    "ast_data": "FunctionDef name:generate_key arg:cls arg:kty arg:crv_or_size arg:options arg:is_private arguments arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "TFClassMethodDispatcher",
    "source_code": "class TFClassMethodDispatcher(dispatch.OpDispatcher):\n\n    def __init__(self, cls, method_name):\n        self.cls = cls\n        self.method_name = method_name\n\n    def handle(self, args, kwargs):\n        if any((isinstance(x, keras_tensor.KerasTensor) for x in nest.flatten([args, kwargs]))):\n            return ClassMethod(self.cls, self.method_name)(args[1:], kwargs)\n        else:\n            return self.NOT_SUPPORTED",
    "docstring": "A class method dispatcher that allows building a functional model with TF class methods.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "ClassDef name:TFClassMethodDispatcher FunctionDef name:__init__ arg:self arg:cls arg:method_name arguments arg arg arg Assign Assign FunctionDef name:handle arg:self arg:args arg:kwargs arguments arg arg arg If Call Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "clear",
    "source_code": "@staticmethod\ndef clear():\n    with FileWriterCache._lock:\n        for item in FileWriterCache._cache.values():\n            item.close()\n        FileWriterCache._cache = {}",
    "docstring": "Clear cached summary writers. Currently only used for unit tests.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer_cache.py",
    "ast_data": "FunctionDef name:clear arguments With For Call Call Assign"
  },
  {
    "library": "django",
    "name": "has_changed",
    "source_code": "def has_changed(self, initial, data):\n    initial_value = self.to_python(initial)\n    return super().has_changed(initial_value, data)",
    "docstring": "Return True if data differs from initial.",
    "type": "method",
    "file_path": "django\\django\\contrib\\postgres\\forms\\hstore.py",
    "ast_data": "FunctionDef name:has_changed arg:self arg:initial arg:data arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_registered_model",
    "source_code": "def get_registered_model(self, app_label, model_name):\n    model = self.all_models[app_label].get(model_name.lower())\n    if model is None:\n        raise LookupError(\"Model '%s.%s' not registered.\" % (app_label, model_name))\n    return model",
    "docstring": "Similar to get_model(), but doesn't require that an app exists with the given app_label. It's safe to call this method at import time, even while the registry is being populated.",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:get_registered_model arg:self arg:app_label arg:model_name arguments arg arg arg Assign Call Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "def close(self):\n    if context.executing_eagerly() and self._closed:\n        return\n    try:\n        with ops.control_dependencies([self.flush()]):\n            with ops.device('cpu:0'):\n                return gen_summary_ops.close_summary_writer(self._resource)\n    finally:\n        if context.executing_eagerly():\n            self._closed = True",
    "docstring": "See .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg If BoolOp Call Return return:no Try With Call Call With Call Return return:yes Call If Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "default_tags",
    "source_code": "def default_tags(estimator) -> Tags:\n    est_is_classifier = getattr(estimator, '_estimator_type', None) == 'classifier'\n    est_is_regressor = getattr(estimator, '_estimator_type', None) == 'regressor'\n    target_required = est_is_classifier or est_is_regressor\n    return Tags(estimator_type=getattr(estimator, '_estimator_type', None), target_tags=TargetTags(required=target_required), transformer_tags=TransformerTags() if hasattr(estimator, 'transform') or hasattr(estimator, 'fit_transform') else None, classifier_tags=ClassifierTags() if est_is_classifier else None, regressor_tags=RegressorTags() if est_is_regressor else None)",
    "docstring": "Get the default tags for an estimator. This ignores any `~.sklearn.utils. TransformerTags~.sklearn.utils.ClassifierTags~.sklearn.utils.RegressorTags`. Parameters ---------- estimator : estimator object The estimator for which to get the default tags. Returns ------- tags : Tags The default tags for the estimator.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_tags.py",
    "ast_data": "FunctionDef name:default_tags arg:estimator arguments arg Assign Compare Call Assign Compare Call Assign BoolOp Return return:yes Call Call Call BoolOp Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "fix_iota_device",
    "source_code": "@register_graph_pattern(CallFunction(torch.ops.prims.iota.default, KeywordArg('length'), start=KeywordArg('start'), step=KeywordArg('step'), dtype=KeywordArg('dtype'), device=KeywordArg('device'), requires_grad=KeywordArg('requires_grad')), pass_dict=patterns)\ndef fix_iota_device(match: Match, length, start, step, dtype, device, requires_grad):\n    node, = match.nodes\n    user_devices = OrderedSet[torch.device]()\n    for user in node.users:\n        if user.op == 'call_function' and user.target in (aten.index.Tensor, aten.index_put.default) and hasattr(user.meta.get('val'), 'device'):\n            user_devices.add(user.meta['val'].device)\n        else:\n            return\n    if len(user_devices) == 1 and 'val' in node.meta:\n        user_device, = user_devices\n        if device.type != user_device.type:\n            repl = match.graph.call_function(torch.ops.prims.iota.default, (length,), {'start': start, 'step': step, 'dtype': dtype, 'device': user_device, 'requires_grad': requires_grad})\n            repl.meta.update(node.meta)\n            repl.meta['val'] = repl.meta['val'].to(user_device)\n            node.replace_all_uses_with(repl)\n            match.erase_nodes()",
    "docstring": "Eager supports: aten.index(cuda_tensor, torch.arange(..., device=\"cpu\")) But this results in an implicit host-device-copy and breaks cudagraphs. Rewrite the arange to use CUDA.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\joint_graph.py",
    "ast_data": "FunctionDef name:fix_iota_device arg:match arg:length arg:start arg:step arg:dtype arg:device arg:requires_grad arguments arg arg arg arg arg arg arg Assign Assign Call For If BoolOp Compare Compare Call Call Call Return return:no If BoolOp Compare Call Compare Assign If Compare Assign Call Call Assign Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_corners",
    "source_code": "def get_corners(self):\n    return self.get_patch_transform().transform([(-1, -1), (1, -1), (1, 1), (-1, 1)])",
    "docstring": "Return the corners of the ellipse bounding box. The bounding box orientation is moving anti-clockwise from the lower left corner defined before rotation.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_corners arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_workflows_push_tags",
    "source_code": "def get_workflows_push_tags() -> set[str]:\n    rc: set[str] = set()\n    for fname in (GITHUB_DIR / 'workflows').glob('*.yml'):\n        with fname.open('r') as f:\n            wf_yml = yaml.safe_load(f)\n        on_tag = wf_yml.get(True, None)\n        push_tag = on_tag.get('push', None) if isinstance(on_tag, dict) else None\n        tags_tag = push_tag.get('tags', None) if isinstance(push_tag, dict) else None\n        if isinstance(tags_tag, list):\n            rc.update(tags_tag)\n    return rc",
    "docstring": "Extract all known push tags from workflows",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\collect_ciflow_labels.py",
    "ast_data": "FunctionDef name:get_workflows_push_tags arguments Call For Call With Call Assign Call Assign Call Assign Call Call Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config):\n    return cls(**config)",
    "docstring": "Instantiates an initializer from a configuration dictionary. Example: Args: config: A Python dictionary. It will typically be the output of . Returns: An Initializer instance.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arguments arg arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "deepcopy",
    "source_code": "def deepcopy(self) -> Self:\n    return deepcopy(self)",
    "docstring": "Return a :func: of this item.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\item.py",
    "ast_data": "FunctionDef name:deepcopy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "onenorm",
    "source_code": "def onenorm(self):\n    if self._A_1_norm is None:\n        self._A_1_norm = _exact_1_norm(self._A)\n    return self._scale * self._A_1_norm",
    "docstring": "Compute the exact 1-norm.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "FunctionDef name:onenorm arg:self arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "maybe_fill",
    "source_code": "def maybe_fill(arr: np.ndarray) -> np.ndarray:\n    if arr.dtype.kind not in 'iub':\n        arr.fill(np.nan)\n    return arr",
    "docstring": "Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\missing.py",
    "ast_data": "FunctionDef name:maybe_fill arg:arr arguments arg If Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_copy",
    "source_code": "def _copy(self, system):\n    self.A = system.A\n    self.B = system.B\n    self.C = system.C\n    self.D = system.D",
    "docstring": "Copy the parameters of another system. Parameters ---------- system : instance of The state-space system that is to be copied",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:_copy arg:self arg:system arguments arg arg Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_get_param_names",
    "source_code": "def _get_param_names(self, return_alias):\n    return set((alias if return_alias and (not request_is_valid(alias)) else prop for prop, alias in self._requests.items() if not request_is_valid(alias) or alias is not False))",
    "docstring": "Get names of all metadata that can be consumed or routed by this method. This method returns the names of all metadata, even the ``, aliases are ignored and original names are returned. Returns ------- names : set of str A set of strings with the names of all parameters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_get_param_names arg:self arg:return_alias arguments arg arg Return return:yes Call BoolOp Call Call BoolOp Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "eager_handle_data",
    "source_code": "@property\ndef eager_handle_data(self):\n    return _get_handle_data(self._matrix) if self._eager_mode else None",
    "docstring": "Return the matrix's handle data iff in eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_ops.py",
    "ast_data": "FunctionDef name:eager_handle_data arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "read",
    "source_code": "def read(self, index, name=None):\n    value = gen_data_flow_ops.tensor_array_read_v3(handle=self._handle, index=index, flow_in=self._flow, dtype=self._dtype, name=name)\n    if self._element_shape:\n        value.set_shape(self._element_shape[0].dims)\n    return value",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:read arg:self arg:index arg:name arguments arg arg arg Assign Call If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "collect",
    "source_code": "def collect(val, collections, default_collections):\n    if collections is None:\n        collections = default_collections\n    for key in collections:\n        ops.add_to_collection(key, val)",
    "docstring": "Adds keys to a collection. Args: val: The value to add per each key. collections: A collection of keys to add. default_collections: Used if collections is None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_op_util.py",
    "ast_data": "FunctionDef name:collect arg:val arg:collections arg:default_collections arguments arg arg arg If Compare Assign For Call"
  },
  {
    "library": "tensorflow",
    "name": "model_from_yaml",
    "source_code": "def model_from_yaml(yaml_string, custom_objects=None):\n    raise RuntimeError('Method `model_from_yaml()` has been removed due to security risk of arbitrary code execution. Please use `Model.to_json()` and `model_from_json()` instead.')",
    "docstring": "Parses a yaml model configuration file and returns a model instance. Note: Since TF 2.6, this method is no longer supported and will raise a RuntimeError. Args: yaml_string: YAML string or open file encoding a model configuration. custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. Returns: A Keras model instance (uncompiled). Raises: RuntimeError: announces that the method poses a security risk",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\model_config.py",
    "ast_data": "FunctionDef name:model_from_yaml arg:yaml_string arg:custom_objects arguments arg arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "_get_window_grid_kernel3d",
    "source_code": "def _get_window_grid_kernel3d(d: int, h: int, w: int, device: Optional[torch.device]=None) -> Tensor:\n    if device is None:\n        device = torch.device('cpu')\n    grid2d = create_meshgrid(h, w, True, device=device)\n    if d > 1:\n        z = torch.linspace(-1, 1, d, device=device).view(d, 1, 1, 1)\n    else:\n        z = zeros(1, 1, 1, 1, device=device)\n    grid3d = concatenate([z.repeat(1, h, w, 1).contiguous(), grid2d.repeat(d, 1, 1, 1)], 3)\n    conv_kernel = grid3d.permute(3, 0, 1, 2).unsqueeze(1)\n    return conv_kernel",
    "docstring": "Generate a kernel to return coordinates, residual to window center. Args: d: kernel depth. h: kernel height. w: kernel width. device: device, on which generate. Returns: conv_kernel [3x1xdxhxw]",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\subpix\\spatial_soft_argmax.py",
    "ast_data": "FunctionDef name:_get_window_grid_kernel3d arg:d arg:h arg:w arg:device arguments arg arg arg arg If Compare Assign Call Assign Call If Compare Assign Call Call Assign Call Assign Call Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "follow_all",
    "source_code": "def follow_all(self, urls: Iterable[str | Link] | parsel.SelectorList | None=None, callback: CallbackT | None=None, method: str='GET', headers: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None=None, body: bytes | str | None=None, cookies: CookiesT | None=None, meta: dict[str, Any] | None=None, encoding: str | None=None, priority: int=0, dont_filter: bool=False, errback: Callable[[Failure], Any] | None=None, cb_kwargs: dict[str, Any] | None=None, flags: list[str] | None=None, css: str | None=None, xpath: str | None=None) -> Iterable[Request]:\n    arguments = [x for x in (urls, css, xpath) if x is not None]\n    if len(arguments) != 1:\n        raise ValueError('Please supply exactly one of the following arguments: urls, css, xpath')\n    if not urls:\n        if css:\n            urls = self.css(css)\n        if xpath:\n            urls = self.xpath(xpath)\n    if isinstance(urls, parsel.SelectorList):\n        selectors = urls\n        urls = []\n        for sel in selectors:\n            with suppress(_InvalidSelector):\n                urls.append(_url_from_selector(sel))\n    return super().follow_all(urls=cast('Iterable[str | Link]', urls), callback=callback, method=method, headers=headers, body=body, cookies=cookies, meta=meta, encoding=encoding, priority=priority, dont_filter=dont_filter, errback=errback, cb_kwargs=cb_kwargs, flags=flags)",
    "docstring": "A generator that produces :class: instances to follow all links in `~.Request~scrapy.link.Linktopics-link-extractors~scrapy.Selector element, e.g. `~scrapy.Selector` attribute)",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\response\\text.py",
    "ast_data": "FunctionDef name:follow_all arg:self arg:urls arg:callback arg:method arg:headers arg:body arg:cookies arg:meta arg:encoding arg:priority arg:dont_filter arg:errback arg:cb_kwargs arg:flags arg:css arg:xpath arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Compare If Compare Call Raise Call If If Assign Call If Assign Call If Call Assign Assign For With Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_device_specs",
    "source_code": "def _make_device_specs(devices: Optional[List[Union[tf_device.DeviceSpec, str]]]=None, device_type: Optional[str]=None) -> Tuple[List[tf_device.DeviceSpec], str]:\n    if devices is None:\n        if device_type is None:\n            device_type = 'CPU'\n        devices = config.local_devices(device_type)\n    else:\n        if isinstance(devices[0], str):\n            devices = [tf_device.DeviceSpec.from_string(d) for d in devices]\n        if device_type is None:\n            device_type = devices[0].device_type\n        if device_type.upper() != devices[0].device_type.upper():\n            raise ValueError(f'Conflicting devices {str(devices)} and device_type {device_type}')\n    return (devices, device_type)",
    "docstring": "Makes device specs for all local devices or from a provided list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\mesh_util.py",
    "ast_data": "FunctionDef name:_make_device_specs arg:devices arg:device_type arguments arg arg If Compare If Compare Assign Assign Call If Call Assign Call If Compare Assign If Compare Call Call Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_attrs_instance",
    "source_code": "def _is_attrs_instance(obj):\n    return getattr(obj.__class__, '__attrs_attrs__', None) is not None",
    "docstring": "Returns True if the given obj is an instance of attrs-decorated class.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:_is_attrs_instance arg:obj arguments arg Return return:yes Compare Call"
  },
  {
    "library": "cherrypy",
    "name": "error_file",
    "source_code": "@property\ndef error_file(self):\n    h = self._get_builtin_handler(self.error_log, 'file')\n    if h:\n        return h.baseFilename\n    return ''",
    "docstring": "The filename for self.error_log. If you set this to a string, it'll add the appropriate FileHandler for you. If you set it to ``, it will remove the handler.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cplogging.py",
    "ast_data": "FunctionDef name:error_file arg:self arguments arg Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "setup_globally_cached",
    "source_code": "def setup_globally_cached(self, name, value):\n    name = re.sub('[^a-zA-Z0-9_]+', '_', name)\n    f_globals = self.tx.f_globals\n    if name in f_globals:\n        assert id(f_globals[name]) == id(value)\n    else:\n        f_globals[name] = value\n    return [self.create_load_global(name, add=True)]",
    "docstring": "Store value in a new global",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\codegen.py",
    "ast_data": "FunctionDef name:setup_globally_cached arg:self arg:name arg:value arguments arg arg arg Assign Call Assign If Compare Compare Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Exit",
    "source_code": "def Exit(self):\n    graph = ops.get_default_graph()\n    last_context = self._context_stack.pop()\n    graph._set_control_flow_context(last_context)",
    "docstring": "Exit this control flow context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:Exit arg:self arguments arg Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "weakref_to_str",
    "source_code": "@staticmethod\ndef weakref_to_str(obj_weakref):\n    if isinstance(obj_weakref, weakref.ReferenceType):\n        obj = obj_weakref()\n        if obj is not None:\n            return f\"<weakref at {hex(id(obj_weakref))}; to '{obj.__class__.__name__}' at {hex(id(obj))}>\"\n        else:\n            return f'<weakref at {hex(id(obj_weakref))}; dead>'\n    else:\n        return str(obj_weakref)",
    "docstring": "This is a workaround of a Python weakref bug. is instance returned by , is buggy if the original obj overrides __getattr__, e.g: class MyConfig(dict): def __getattr__(self, x): return self[x] obj = MyConfig(offset=5) obj_weakref = weakref.ref(obj) str(obj_weakref) # raise error: KeyError: '__name__'",
    "type": "method",
    "file_path": "pytorch\\torch\\_guards.py",
    "ast_data": "FunctionDef name:weakref_to_str arg:obj_weakref arguments arg If Call Assign Call If Compare Return return:yes Call Call Call Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_updated_config",
    "source_code": "def _updated_config(self):\n    from tensorflow.python.keras import __version__ as keras_version\n    config = self.get_config()\n    model_config = {'class_name': self.__class__.__name__, 'config': config, 'keras_version': keras_version, 'backend': backend.backend()}\n    return model_config",
    "docstring": "Util shared between different serialization methods. Returns: Model config with Keras version information added.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:_updated_config arg:self arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_range_prod",
    "source_code": "def _range_prod(lo, hi, k=1):\n    if lo == 1 and k == 1:\n        return math.factorial(hi)\n    if lo + k < hi:\n        mid = (hi + lo) // 2\n        if k > 1:\n            mid = mid - (mid - hi) % k\n        return _range_prod(lo, mid, k) * _range_prod(mid + k, hi, k)\n    elif lo + k == hi:\n        return lo * hi\n    else:\n        return hi",
    "docstring": "Product of a range of numbers spaced k apart (from hi). For k=1, this returns the product of lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi = hi! / (lo-1)! For k>1, it correspond to taking only every k'th number when counting down from hi - e.g. 18!!!! = _range_prod(1, 18, 4). Breaks into smaller products first for speed: _range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:_range_prod arg:lo arg:hi arg:k arguments arg arg arg If BoolOp Compare Compare Return return:yes Call If Compare Assign If Compare Assign Return return:yes Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_hue_backcompat",
    "source_code": "def _hue_backcompat(self, color, palette, hue_order, force_hue=False):\n    default_behavior = color is None or palette is not None\n    if force_hue and 'hue' not in self.variables and default_behavior:\n        self._redundant_hue = True\n        self.plot_data['hue'] = self.plot_data[self.orient]\n        self.variables['hue'] = self.variables[self.orient]\n        self.var_types['hue'] = 'categorical'\n        hue_order = self.var_levels[self.orient]\n        if isinstance(palette, dict):\n            palette = {str(k): v for k, v in palette.items()}\n    else:\n        if 'hue' in self.variables:\n            redundant = (self.plot_data['hue'] == self.plot_data[self.orient]).all()\n        else:\n            redundant = False\n        self._redundant_hue = redundant\n    if 'hue' in self.variables and palette is None and (color is not None):\n        if not isinstance(color, str):\n            color = mpl.colors.to_hex(color)\n        palette = f'dark:{color}'\n        msg = f\"\\n\\nSetting a gradient palette using color= is deprecated and will be removed in v0.14.0. Set `palette='{palette}'` for the same effect.\\n\"\n        warnings.warn(msg, FutureWarning, stacklevel=3)\n    return (palette, hue_order)",
    "docstring": "Implement backwards compatibility for hue parametrization. Note: the force_hue parameter is used so that functions can be shown to pass existing tests during refactoring and then tested for new behavior. It can be removed after completion of the work.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:_hue_backcompat arg:self arg:color arg:palette arg:hue_order arg:force_hue arguments arg arg arg arg arg Assign BoolOp Compare Compare If BoolOp Compare Assign Assign Assign Assign Assign If Call Assign Call Call If Compare Assign Call Compare Assign Assign If BoolOp Compare Compare Compare If Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "getX",
    "source_code": "def getX(self, index):\n    return self.getOrdinate(0, index)",
    "docstring": "Get the X value at the index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:getX arg:self arg:index arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "compute_last_usage",
    "source_code": "def compute_last_usage(self) -> None:\n    future_used_buffers = OrderedSet(V.graph.get_output_names())\n    for node in reversed(self.nodes):\n        node.set_last_usage(future_used_buffers, self.mutation_real_name)\n        future_used_buffers.update(node.last_usage)",
    "docstring": "Populate node.last_usage recursively (also for the nodes within a FusedSchedulerNode)",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:compute_last_usage arg:self arguments arg Assign Call Call For Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_post_plot_logic",
    "source_code": "@abstractmethod\ndef _post_plot_logic(self, ax: Axes, data) -> None:\n    pass",
    "docstring": "Post process for each axes. Overridden in child classes",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py",
    "ast_data": "FunctionDef name:_post_plot_logic arg:self arg:ax arg:data arguments arg arg arg"
  },
  {
    "library": "kornia",
    "name": "list",
    "source_code": "@classmethod\ndef list(cls) -> list[str]:\n    return [c.name for c in cls]",
    "docstring": "Return a list of names of enumeration members. Returns: A list containing the names of enumeration members.",
    "type": "method",
    "file_path": "kornia\\kornia\\color\\colormap.py",
    "ast_data": "FunctionDef name:list arg:cls arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_reset_cache",
    "source_code": "def _reset_cache(self, key: str | None=None) -> None:\n    if not hasattr(self, '_cache'):\n        return\n    if key is None:\n        self._cache.clear()\n    else:\n        self._cache.pop(key, None)",
    "docstring": "Reset cached properties. If `` is passed, only clears that key.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\base.py",
    "ast_data": "FunctionDef name:_reset_cache arg:self arg:key arguments arg arg If Call Return return:no If Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_revalidate_path",
    "source_code": "def _revalidate_path(self):\n    if self._invalid or self._cached_vertices is None:\n        tr = Affine2D().scale(self._size / text_to_path.FONT_SCALE).translate(*self._xy)\n        self._cached_vertices = tr.transform(self._vertices)\n        self._cached_vertices.flags.writeable = False\n        self._invalid = False",
    "docstring": "Update the path if necessary. The path for the text is initially create with the font size of , and this path is rescaled to other size when necessary.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\textpath.py",
    "ast_data": "FunctionDef name:_revalidate_path arg:self arguments arg If BoolOp Compare Assign Call Call Call Assign Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "derivative",
    "source_code": "def derivative(self, nu):\n    p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)\n    for axis, n in enumerate(nu):\n        p._derivative_inplace(n, axis)\n    p._ensure_c_contiguous()\n    return p",
    "docstring": "Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : ndim-tuple of int Order of derivatives to evaluate for each dimension. If negative, the antiderivative is returned. Returns ------- pp : NdPPoly Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n]) representing the derivative of this polynomial. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals in each dimension are considered half-open, ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:derivative arg:self arg:nu arguments arg arg Assign Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "deprecate",
    "source_code": "def deprecate(*args, **kwargs):\n    warnings.warn('`deprecate` is deprecated, use `warn` with `DeprecationWarning` instead. (deprecated in NumPy 2.0)', DeprecationWarning, stacklevel=2)\n    if args:\n        fn = args[0]\n        args = args[1:]\n        return _Deprecate(*args, **kwargs)(fn)\n    else:\n        return _Deprecate(*args, **kwargs)",
    "docstring": "Issues a DeprecationWarning, adds warning to 's docstring, rebinds `~warnings.warnDeprecationWarningfuncold_nameold_namenew_nameuint64` is deprecated! # may vary >>> olduint(6) 6",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_utils_impl.py",
    "ast_data": "FunctionDef name:deprecate arguments arg arg Call If Assign Assign Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_default_formatter",
    "source_code": "def _default_formatter(x: Any, precision: int, thousands: bool=False) -> Any:\n    if is_float(x) or is_complex(x):\n        return f'{x:,.{precision}f}' if thousands else f'{x:.{precision}f}'\n    elif is_integer(x):\n        return f'{x:,}' if thousands else str(x)\n    return x",
    "docstring": "Format the display of a value Parameters ---------- x : Any Input variable to be formatted precision : Int Floating point precision used if `` is float or complex. thousands : bool, default False Whether to group digits with thousands separated with \",\". Returns ------- value : Any Matches input type, or string if input is float or complex or int with sep.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_default_formatter arg:x arg:precision arg:thousands arguments arg arg arg If BoolOp Call Call Return return:yes If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register",
    "source_code": "def register(self, package, name, predicate, candidate):\n    if not isinstance(package, str) or not isinstance(name, str):\n        raise TypeError(f'The package and name registered to a {self.name} must be strings, got: package={type(package)}, name={type(name)}')\n    if not callable(predicate):\n        raise TypeError(f'The predicate registered to a {self.name} must be callable, got: {type(predicate)}')\n    registered_name = package + '.' + name\n    if not _VALID_REGISTERED_NAME.match(registered_name):\n        raise ValueError(f\"Invalid registered {self.name}. Please check that the package and name follow the regex '{_VALID_REGISTERED_NAME.pattern}': (package='{package}', name='{name}')\")\n    if registered_name in self._registered_map:\n        raise ValueError(f\"The name '{registered_name}' has already been registered to a {self.name}. Found: {self._registered_map[registered_name]}\")\n    self._registered_map[registered_name] = candidate\n    self._registered_predicates[registered_name] = predicate\n    self._registered_names.append(registered_name)",
    "docstring": "Registers a candidate object under the package, name and predicate.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\registration\\registration.py",
    "ast_data": "FunctionDef name:register arg:self arg:package arg:name arg:predicate arg:candidate arguments arg arg arg arg arg If BoolOp Call Call Raise Call Call Call If Call Raise Call Call Assign If Call Raise Call If Compare Raise Call Assign Assign Call"
  },
  {
    "library": "numpy",
    "name": "set_display",
    "source_code": "def set_display(self, s):\n    self._display = s",
    "docstring": "Set the string to print for masked values.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:set_display arg:self arg:s arguments arg arg Assign"
  },
  {
    "library": "pandas",
    "name": "_values_for_rank",
    "source_code": "def _values_for_rank(self) -> np.ndarray:\n    from pandas import Series\n    if self.ordered:\n        values = self.codes\n        mask = values == -1\n        if mask.any():\n            values = values.astype('float64')\n            values[mask] = np.nan\n    elif is_any_real_numeric_dtype(self.categories.dtype):\n        values = np.array(self)\n    else:\n        values = np.array(self.rename_categories(Series(self.categories, copy=False).rank().values))\n    return values",
    "docstring": "For correctly ranking ordered categorical data. See GH#15420 Ordered categorical data should be ranked on the basis of codes with -1 translated to NaN. Returns ------- numpy.array",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:_values_for_rank arg:self arguments arg If Assign Assign Compare If Call Assign Call Assign If Call Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_dynamic_nn_module",
    "source_code": "def is_dynamic_nn_module(obj: Any, is_export: bool) -> bool:\n    if isinstance(obj, torch.nn.Module) and ('forward' in obj.__dict__ or isinstance(obj, (dict, MutableMapping))):\n        return True\n    if hasattr(obj, 'torchdynamo_force_dynamic'):\n        return obj.torchdynamo_force_dynamic\n    if isinstance(obj, torch.nn.Module) and config.inline_inbuilt_nn_modules and (not is_export or config.install_free_tensors):\n        return True\n    if isinstance(obj, torch.nn.Module) and nn_module_has_global_hooks():\n        return True\n    dyn = GenerationTracker.dynamic_classes.get(type(obj)) or GenerationTracker.check(obj)\n    return dyn",
    "docstring": "Check for nn.Modules() created dynamically or mutated",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\mutation_guard.py",
    "ast_data": "FunctionDef name:is_dynamic_nn_module arg:obj arg:is_export arguments arg arg If BoolOp Call BoolOp Compare Call Return return:yes If Call Return return:yes If BoolOp Call BoolOp Return return:yes If BoolOp Call Call Return return:yes Assign BoolOp Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_control_flow_post_processing",
    "source_code": "def _control_flow_post_processing(self, input_tensors=None) -> None:\n    if input_tensors is None:\n        input_tensors = self.inputs\n    for input_tensor in input_tensors:\n        control_flow_util.CheckInputFromValidContext(self, input_tensor.op)\n    if self._control_flow_context is not None:\n        self._control_flow_context.AddOp(self)",
    "docstring": "Add this op to its control flow context. This may add new ops and change this op's inputs. self.inputs must be available before calling this method. Args: input_tensors: (Optional.) A list of corresponding to the inputs of this op, which should be equivalent to . Pass this argument to avoid evaluating unnecessarily.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_control_flow_post_processing arg:self arg:input_tensors arguments arg arg If Compare Assign For Call If Compare Call"
  },
  {
    "library": "sphinx",
    "name": "Todo",
    "source_code": "class Todo(SphinxAdmonition):\n    node_class = todo_node\n\n    def run(self) -> list[Node]:\n        if not self.options.get('class'):\n            self.options['class'] = ['admonition-todo']\n        todo, = super().run()\n        if not isinstance(todo, todo_node):\n            return [todo]\n        todo.insert(0, nodes.title(text=_('Todo')))\n        todo['docname'] = self.env.docname\n        self.add_name(todo)\n        self.set_source_info(todo)\n        self.state.document.note_explicit_target(todo)\n        return [todo]",
    "docstring": "A todo entry, displayed (if configured) in the form of an admonition.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\todo.py",
    "ast_data": "ClassDef name:Todo Assign FunctionDef name:run arg:self arguments arg If Call Assign Assign Call Call If Call Return return:yes Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "supports_blit",
    "source_code": "@_api.classproperty\ndef supports_blit(cls):\n    return hasattr(cls, 'copy_from_bbox') and hasattr(cls, 'restore_region')",
    "docstring": "If this Canvas sub-class supports blitting.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:supports_blit arg:cls arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_sharding_tile_shape",
    "source_code": "def get_sharding_tile_shape(sharding):\n    if sharding is None:\n        return None\n    sharding_message = xla_data_pb2.OpSharding()\n    sharding_message.ParseFromString(sharding)\n    if sharding_message.tile_assignment_dimensions:\n        return sharding_message.tile_assignment_dimensions\n    else:\n        return None",
    "docstring": "Returns the tile assignment shape for a sharded Tensor. Args: sharding: a serialized OpSharding message describing the layout of a sharded Tensor. Returns: A list, for each dimension of the sharded Tensor, of the number of shards into which it has been split. Returns None if the input indicates no tile assignments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py",
    "ast_data": "FunctionDef name:get_sharding_tile_shape arg:sharding arguments arg If Compare Return return:no Assign Call Call If Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "start_server",
    "source_code": "@tf_export('profiler.experimental.server.start', v1=[])\ndef start_server(port):\n    _pywrap_profiler.start_server(port)",
    "docstring": "Start a profiler grpc server that listens to given port. The profiler server will exit when the process finishes. The service is defined in tensorflow/core/profiler/profiler_service.proto. Args: port: port profiler server listens to. Example usage: ```python tf.profiler.experimental.server.start(6009) # do your training here.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\profiler_v2.py",
    "ast_data": "FunctionDef name:start_server arg:port arguments arg Call Call"
  },
  {
    "library": "django",
    "name": "timezone",
    "source_code": "@cached_property\ndef timezone(self):\n    if not settings.USE_TZ:\n        return None\n    elif self.settings_dict['TIME_ZONE'] is None:\n        return datetime.UTC\n    else:\n        return zoneinfo.ZoneInfo(self.settings_dict['TIME_ZONE'])",
    "docstring": "Return a tzinfo of the database connection time zone. This is only used when time zone support is enabled. When a datetime is read from the database, it is always returned in this time zone. When the database backend supports time zones, it doesn't matter which time zone Django uses, as long as aware datetimes are used everywhere. Other users connecting to the database can choose their own time zone. When the database backend doesn't support time zones, the time zone Django uses may be constrained by the requirements of other users of the database.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:timezone arg:self arguments arg If Return return:no If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "update_restore_inputs",
    "source_code": "def update_restore_inputs(self, checkpoint_key, shape_and_slice_spec) -> tuple[List[str], List[str]]:\n    return ([checkpoint_key], [shape_and_slice_spec])",
    "docstring": "Updates the specs to restore op. Override this method if the arguments to restore op need to be updated as per the resharding required. Args: checkpoint_key: The checkpoint key as requested by the caller shape_and_slice_spec: The shape and slice spec as requested by caller Returns: Tuple of list of checkpoint_keys and specs that the restore op should fetch as per the resharding requirement. The length of checkpoint keys returned by this method will match the length of checkpoint_values that are input to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_adapter.py",
    "ast_data": "FunctionDef name:update_restore_inputs arg:self arg:checkpoint_key arg:shape_and_slice_spec arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "default_local_init_op",
    "source_code": "@staticmethod\ndef default_local_init_op():\n    return control_flow_ops.group(variables.local_variables_initializer(), lookup_ops.tables_initializer(), resources.initialize_resources(resources.local_resources()))",
    "docstring": "Returns an op that groups the default local init ops. This op is used during session initialization when a Scaffold is initialized without specifying the local_init_op arg. It includes , , and also initializes local session resources. Returns: The default Scaffold local init op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "FunctionDef name:default_local_init_op arguments Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "share_memory",
    "source_code": "def share_memory(self) -> Self:\n    return self._apply(lambda t: t.share_memory_())",
    "docstring": "See :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:share_memory arg:self arguments arg Return return:yes Call arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, storage_writer: StorageWriter, storage_reader: StorageReader, *, process_group: Optional[dist.ProcessGroup]=None, coordinator_rank: int=0, no_dist: bool=False, load_planner: Optional[LoadPlanner]=None, save_planner: Optional[SavePlanner]=None):\n    self.storage_writer = storage_writer\n    self.storage_reader = storage_reader\n    self.process_group = process_group\n    self.coordinator_rank = coordinator_rank\n    self.no_dist = no_dist\n    self.load_planner = load_planner\n    self.save_planner = save_planner",
    "docstring": "Initializes the Checkpointer instance. Args: storage_writer: Instance of StorageWrite use to perform writes. storage_reader: StorageReader used to load data from. process_group: ProcessGroup to be used for cross-rank synchronization. coordinator_rank: Rank to use to coordinate the checkpoint. rank0 is used by default. no_dist: If ``) loader_planner: Instance of LoadPlanner to use when loading. save_planner: Instance of SavePlanner to use when saving.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_checkpointer.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:storage_writer arg:storage_reader arguments arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "_is_matching_generic_foreign_key",
    "source_code": "def _is_matching_generic_foreign_key(self, field):\n    return isinstance(field, GenericForeignKey) and field.ct_field == self.content_type_field_name and (field.fk_field == self.object_id_field_name)",
    "docstring": "Return True if field is a GenericForeignKey whose content type and object id fields correspond to the equivalent attributes on this GenericRelation.",
    "type": "method",
    "file_path": "django\\django\\contrib\\contenttypes\\fields.py",
    "ast_data": "FunctionDef name:_is_matching_generic_foreign_key arg:self arg:field arguments arg arg Return return:yes BoolOp Call Compare Compare"
  },
  {
    "library": "scikit-learn",
    "name": "_predict_proba_lr",
    "source_code": "def _predict_proba_lr(self, X):\n    prob = self.decision_function(X)\n    expit(prob, out=prob)\n    if prob.ndim == 1:\n        return np.vstack([1 - prob, prob]).T\n    else:\n        prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))\n        return prob",
    "docstring": "Probability estimation for OvR logistic regression. Positive class probabilities are computed as 1. / (1. + np.exp(-self.decision_function(X))); multiclass is handled by normalizing that over all classes.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py",
    "ast_data": "FunctionDef name:_predict_proba_lr arg:self arg:X arguments arg arg Assign Call Call If Compare Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "array_from_header",
    "source_code": "def array_from_header(self, header):\n    pass",
    "docstring": "Reads array given header",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py",
    "ast_data": "FunctionDef name:array_from_header arg:self arg:header arguments arg arg"
  },
  {
    "library": "scrapy",
    "name": "syntax",
    "source_code": "def syntax(self) -> str:\n    return ''",
    "docstring": "Command syntax (preferably one-line). Do not include command name.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\commands\\__init__.py",
    "ast_data": "FunctionDef name:syntax arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "validators",
    "source_code": "@cached_property\ndef validators(self):\n    return [*self.default_validators, *self._validators]",
    "docstring": "Some validators can't be created at field initialization time. This method provides a way to delay their creation until required.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:validators arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_nanquantile_1d",
    "source_code": "def _nanquantile_1d(arr1d, q, overwrite_input=False, method='linear', weights=None):\n    arr1d, weights, overwrite_input = _remove_nan_1d(arr1d, second_arr1d=weights, overwrite_input=overwrite_input)\n    if arr1d.size == 0:\n        return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()]\n    return fnb._quantile_unchecked(arr1d, q, overwrite_input=overwrite_input, method=method, weights=weights)",
    "docstring": "Private function for rank 1 arrays. Compute quantile ignoring NaNs. See nanpercentile for parameter usage",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py",
    "ast_data": "FunctionDef name:_nanquantile_1d arg:arr1d arg:q arg:overwrite_input arg:method arg:weights arguments arg arg arg arg arg Assign Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SubprocessHandler",
    "source_code": "class SubprocessHandler:\n\n    def __init__(self, entrypoint: str, args: tuple, env: dict[str, str], stdout: Optional[str], stderr: Optional[str], local_rank_id: int):\n        self._stdout = open(stdout, 'w') if stdout else None\n        self._stderr = open(stderr, 'w') if stderr else None\n        env_vars = os.environ.copy()\n        env_vars.update(env)\n        args_str = (entrypoint, *[str(e) for e in args])\n        self.local_rank_id = local_rank_id\n        self.proc: subprocess.Popen = self._popen(args_str, env_vars)\n\n    def _popen(self, args: tuple, env: dict[str, str]) -> subprocess.Popen:\n        kwargs: dict[str, Any] = {}\n        if not IS_WINDOWS:\n            kwargs['start_new_session'] = True\n        return subprocess.Popen(args=args, env=env, stdout=self._stdout, stderr=self._stderr, **kwargs)\n\n    def close(self, death_sig: Optional[signal.Signals]=None) -> None:\n        if not death_sig:\n            death_sig = _get_default_signal()\n        if IS_WINDOWS:\n            self.proc.send_signal(death_sig)\n        else:\n            os.killpg(self.proc.pid, death_sig)\n        if self._stdout:\n            self._stdout.close()\n        if self._stderr:\n            self._stderr.close()",
    "docstring": "Convenience wrapper around python's ``. Keeps track of meta-objects associated to the process (e.g. stdout and stderr redirect fds).",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\subprocess_handler\\subprocess_handler.py",
    "ast_data": "ClassDef name:SubprocessHandler FunctionDef name:__init__ arg:self arg:entrypoint arg:args arg:env arg:stdout arg:stderr arg:local_rank_id arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call FunctionDef name:_popen arg:self arg:args arg:env arguments arg arg arg If Assign Return return:yes Call FunctionDef name:close arg:self arg:death_sig arguments arg arg If Assign Call If Call Call If Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "is_resource_variable",
    "source_code": "@tf_export('__internal__.ops.is_resource_variable', v1=[])\ndef is_resource_variable(var):\n    return isinstance(var, BaseResourceVariable) or hasattr(var, '_should_act_as_resource_variable')",
    "docstring": "\"Returns True if is to be considered a ResourceVariable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:is_resource_variable arg:var arguments arg Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "pandas",
    "name": "shape",
    "source_code": "@property\ndef shape(self) -> Shape:\n    return (len(self),)",
    "docstring": "Return a tuple of the array dimensions. See Also -------- numpy.ndarray.shape : Similar attribute which returns the shape of an array. DataFrame.shape : Return a tuple representing the dimensionality of the DataFrame. Series.shape : Return a tuple representing the dimensionality of the Series. Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr.shape (3,)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "embed_check_nonnegative_integer_form",
    "source_code": "def embed_check_nonnegative_integer_form(x, name='embed_check_nonnegative_integer_form'):\n    with ops.name_scope(name, values=[x]):\n        x = ops.convert_to_tensor(x, name='x')\n        assertions = [check_ops.assert_non_negative(x, message=\"'{}' must be non-negative.\".format(x))]\n        if not x.dtype.is_integer:\n            assertions += [assert_integer_form(x, message=\"'{}' cannot contain fractional components.\".format(x))]\n        return control_flow_ops.with_dependencies(assertions, x)",
    "docstring": "Assert x is a non-negative tensor, and optionally of integers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:embed_check_nonnegative_integer_form arg:x arg:name arguments arg arg With Call Assign Call Assign Call Call If Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "update_if_finite_grads",
    "source_code": "def update_if_finite_grads():\n\n    def incr_loss_scale():\n        new_loss_scale = self.current_loss_scale * self.multiplier\n        return control_flow_ops.group(_assign_if_finite(self.current_loss_scale, new_loss_scale), self.counter.assign(0))\n    return cond.cond(self.counter + 1 >= self.growth_steps, incr_loss_scale, lambda: _op_in_graph_mode(self.counter.assign_add(1)))",
    "docstring": "Update assuming the gradients are finite.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:update_if_finite_grads arguments FunctionDef name:incr_loss_scale arguments Assign Return return:yes Call Call Call Return return:yes Call Compare arguments Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_intermediates_match_xla",
    "source_code": "def _make_intermediates_match_xla(branch_graphs, branch_intermediates):\n    new_branch_intermediates = []\n    for i, branch_graph in enumerate(branch_graphs):\n        other_fakeparams = _create_fakeparams(branch_graph, sum((bi for bi in branch_intermediates if bi is not branch_intermediates[i]), []))\n        num_preceding = sum((len(bi) for bi in branch_intermediates[:i]))\n        new_branch_intermediates.append(other_fakeparams[:num_preceding] + branch_intermediates[i] + other_fakeparams[num_preceding:])\n    return new_branch_intermediates",
    "docstring": "Like _make_intermediates_match but for the XLA case.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_make_intermediates_match_xla arg:branch_graphs arg:branch_intermediates arguments arg arg Assign For Call Assign Call Call Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "add_artist",
    "source_code": "def add_artist(self, a):\n    a.axes = self\n    self._children.append(a)\n    a._remove_method = self._children.remove\n    self._set_artist_props(a)\n    if a.get_clip_path() is None:\n        a.set_clip_path(self.patch)\n    self.stale = True\n    return a",
    "docstring": "Add an to the Axes; return the artist. Use only for artists for which there is no dedicated \"add\" method; and if necessary, use a method such as to manually update the if the artist is to be included in autoscaling. If no ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:add_artist arg:self arg:a arguments arg arg Assign Call Assign Call If Compare Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    feature_names_out = input_features\n    for _, name, transform in self._iter():\n        if not hasattr(transform, 'get_feature_names_out'):\n            raise AttributeError('Estimator {} does not provide get_feature_names_out. Did you mean to call pipeline[:-1].get_feature_names_out()?'.format(name))\n        feature_names_out = transform.get_feature_names_out(feature_names_out)\n    return feature_names_out",
    "docstring": "Get output feature names for transformation. Transform input features using the pipeline. Parameters ---------- input_features : array-like of str or None, default=None Input features. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Assign For Call If Call Raise Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_full_name",
    "source_code": "def get_full_name(self):\n    full_name = '%s %s' % (self.first_name, self.last_name)\n    return full_name.strip()",
    "docstring": "Return the first_name plus the last_name, with a space in between.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\models.py",
    "ast_data": "FunctionDef name:get_full_name arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_CreateDenseMaskAndBegin",
    "source_code": "def _CreateDenseMaskAndBegin(sizes, concat_dim):\n    shape_of_shape = array_ops.shape(sizes[0])\n    mask = array_ops.concat([array_ops.zeros(array_ops.expand_dims(concat_dim, 0), dtype=dtypes.int32), [1], array_ops.zeros(shape_of_shape - concat_dim - 1, dtype=dtypes.int32)], 0)\n    begin = array_ops.zeros(shape_of_shape, dtype=dtypes.int32)\n    return (mask, begin)",
    "docstring": "Create variables for iteratively slicing a dense gradients tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_CreateDenseMaskAndBegin arg:sizes arg:concat_dim arguments arg arg Assign Call Assign Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "frozencopy",
    "source_code": "def frozencopy(self) -> Self:\n    copy = self.copy()\n    copy.freeze()\n    return copy",
    "docstring": "Return an immutable copy of the current settings. Alias for a :meth: call in the object returned by :meth:.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:frozencopy arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compile",
    "source_code": "def compile(gm: torch.fx.GraphModule, example_inputs: list[InputType], options: Optional[dict[str, Any]]=None):\n    from .compile_fx import compile_fx\n    return compile_fx(gm, example_inputs, config_patches=options)",
    "docstring": "Compile a given FX graph with TorchInductor. This allows compiling FX graphs captured without using TorchDynamo. Args: gm: The FX graph to compile. example_inputs: List of tensor inputs. options: Optional dict of config options. See . Returns: Callable with same behavior as gm but faster.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\__init__.py",
    "ast_data": "FunctionDef name:compile arg:gm arg:example_inputs arg:options arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "default_eval_fn",
    "source_code": "def default_eval_fn(model, calib_data):\n    for data, _target in calib_data:\n        model(data)",
    "docstring": "Define the default evaluation function. Default evaluation function takes a torch.utils.data.Dataset or a list of input Tensors and run the model on the dataset",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\__init__.py",
    "ast_data": "FunctionDef name:default_eval_fn arg:model arg:calib_data arguments arg arg For Call"
  },
  {
    "library": "pytorch",
    "name": "try_match_insignificant_strides",
    "source_code": "def try_match_insignificant_strides(tensor: Union[TensorBox, BaseView], strides: Sequence[Union[int, torch.SymInt]]) -> Union[TensorBox, BaseView]:\n    if not is_storage_and_layout(tensor):\n        return tensor\n    if all((V.graph.sizevars.statically_known_equals(s1, s2) for s1, s2 in zip(strides, tensor.get_stride()))):\n        return tensor\n    if not significant_strides_equal(strides, tensor.get_stride(), tensor.get_size()):\n        return tensor\n    storage, old_layout = as_storage_and_layout(tensor)\n    new_stride = [*old_layout.stride]\n    for i, s in enumerate(tensor.get_size()):\n        if V.graph.sizevars.statically_known_leq(s, 1):\n            new_stride[i] = strides[i]\n    new_layout = FixedLayout(old_layout.device, old_layout.dtype, old_layout.size, new_stride, old_layout.offset)\n    return TensorBox(ReinterpretView(data=storage, layout=new_layout))",
    "docstring": "Tries to match the strides of the tensor to those in the meta_strides. Strides of insignificant dimensions - size 0 or 1 - will be updated. If there are real stride differences (NHWC vs NCHW), or the tensor is not realized, then the input will be returned",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:try_match_insignificant_strides arg:tensor arg:strides arguments arg arg If Call Return return:yes If Call Call Call Call Return return:yes If Call Call Call Return return:yes Assign Call Assign For Call Call If Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "write_array_empty",
    "source_code": "def write_array_empty(self, key: str, value: ArrayLike) -> None:\n    arr = np.empty((1,) * value.ndim)\n    self._handle.create_array(self.group, key, arr)\n    node = getattr(self.group, key)\n    node._v_attrs.value_type = str(value.dtype)\n    node._v_attrs.shape = value.shape",
    "docstring": "write a 0-len array",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:write_array_empty arg:self arg:key arg:value arguments arg arg arg Assign Call Call Assign Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "IndexLookupLayerSavedModelSaver",
    "source_code": "class IndexLookupLayerSavedModelSaver(LayerSavedModelSaver):\n\n    @property\n    def python_properties(self):\n        metadata = self._python_properties_internal()\n        if metadata['config'].get('has_static_table', False):\n            metadata['config']['vocabulary'] = None\n        return metadata",
    "docstring": "Index lookup layer serialization.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\layer_serialization.py",
    "ast_data": "ClassDef name:IndexLookupLayerSavedModelSaver FunctionDef name:python_properties arg:self arguments arg Assign Call If Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "handle_SIGHUP",
    "source_code": "def handle_SIGHUP(self):\n    if self._is_daemonized():\n        self.bus.log('SIGHUP caught while daemonized. Restarting.')\n        self.bus.restart()\n    else:\n        self.bus.log('SIGHUP caught but not daemonized. Exiting.')\n        self.bus.exit()",
    "docstring": "Restart if daemonized, else exit.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:handle_SIGHUP arg:self arguments arg If Call Call Call Call Call"
  },
  {
    "library": "django",
    "name": "layer_count",
    "source_code": "@property\ndef layer_count(self):\n    return capi.get_layer_count(self._ptr)",
    "docstring": "Return the number of layers in the data source.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\datasource.py",
    "ast_data": "FunctionDef name:layer_count arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "multilabel_soft_margin_loss",
    "source_code": "def multilabel_soft_margin_loss(input: Tensor, target: Tensor, weight: Optional[Tensor]=None, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n    if has_torch_function_variadic(input, target, weight):\n        return handle_torch_function(multilabel_soft_margin_loss, (input, target, weight), input, target, weight=weight, size_average=size_average, reduce=reduce, reduction=reduction)\n    if size_average is not None or reduce is not None:\n        reduction = _Reduction.legacy_get_string(size_average, reduce)\n    loss = -(target * logsigmoid(input) + (1 - target) * logsigmoid(-input))\n    if weight is not None:\n        loss = loss * weight\n    class_dim = input.dim() - 1\n    C = input.size(class_dim)\n    loss = loss.sum(dim=class_dim) / C\n    if reduction == 'none':\n        ret = loss\n    elif reduction == 'mean':\n        ret = loss.mean()\n    elif reduction == 'sum':\n        ret = loss.sum()\n    else:\n        ret = input\n        raise ValueError(reduction + ' is not valid')\n    return ret",
    "docstring": "Compute the multilabel soft margin loss. See :class: for details. Args: input (Tensor): Predicted values. target (Tensor): Ground truth values. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. Returns: Tensor: Mutilabel soft margin loss.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:multilabel_soft_margin_loss arg:input arg:target arg:weight arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call Call If Compare Assign Assign Call Assign Call Assign Call If Compare Assign If Compare Assign Call If Compare Assign Call Assign Raise Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_token_request",
    "source_code": "def validate_token_request(self):\n    client = self.authenticate_token_endpoint_client()\n    log.debug('Validate token request of %r', client)\n    if not client.check_grant_type(self.GRANT_TYPE):\n        raise UnauthorizedClientError(f\"The client is not authorized to use 'grant_type={self.GRANT_TYPE}'\")\n    self.request.client = client\n    self.validate_requested_scope()",
    "docstring": "The client makes a request to the token endpoint by adding the following parameters using the \"application/x-www-form-urlencoded\" format per Appendix B with a character encoding of UTF-8 in the HTTP request entity-body: grant_type REQUIRED. Value MUST be set to \"client_credentials\". scope OPTIONAL. The scope of the access request as described by Section 3.3. The client MUST authenticate with the authorization server as described in Section 3.2.1. For example, the client makes the following HTTP request using transport-layer security (with extra line breaks for display purposes only): .. code-block:: http POST /token HTTP/1.1 Host: server.example.com Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW Content-Type: application/x-www-form-urlencoded grant_type=client_credentials The authorization server MUST authenticate the client.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\client_credentials.py",
    "ast_data": "FunctionDef name:validate_token_request arg:self arguments arg Assign Call Call If Call Raise Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "fix_image_flip_shape",
    "source_code": "def fix_image_flip_shape(image, result):\n    image_shape = image.get_shape()\n    if image_shape == tensor_shape.unknown_shape():\n        result.set_shape([None, None, None])\n    else:\n        result.set_shape(image_shape)\n    return result",
    "docstring": "Set the shape to 3 dimensional if we don't know anything else. Args: image: original image size result: flipped or transformed image Returns: An image whose shape is at least (None, None, None).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:fix_image_flip_shape arg:image arg:result arguments arg arg Assign Call If Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_increase_ragged_rank_to",
    "source_code": "def _increase_ragged_rank_to(rt_input, ragged_rank, row_splits_dtype):\n    if ragged_rank > 0:\n        if not ragged_tensor.is_ragged(rt_input):\n            rt_input = ragged_tensor.RaggedTensor.from_tensor(rt_input, row_splits_dtype=row_splits_dtype)\n        if rt_input.ragged_rank < ragged_rank:\n            rt_input = rt_input.with_values(_increase_ragged_rank_to(rt_input.values, ragged_rank - 1, row_splits_dtype))\n    return rt_input",
    "docstring": "Adds ragged dimensions to so it has the desired ragged rank.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_concat_ops.py",
    "ast_data": "FunctionDef name:_increase_ragged_rank_to arg:rt_input arg:ragged_rank arg:row_splits_dtype arguments arg arg arg If Compare If Call Assign Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_RsqrtGradGrad",
    "source_code": "@ops.RegisterGradient('RsqrtGrad')\ndef _RsqrtGradGrad(op: ops.Operation, grad):\n    a = op.inputs[0]\n    b = op.inputs[1]\n    with ops.control_dependencies([grad]):\n        ca = math_ops.conj(a)\n        cg = math_ops.conj(grad)\n        grad_a = -1.5 * cg * b * math_ops.square(ca)\n        grad_b = gen_math_ops.rsqrt_grad(ca, grad)\n        return (grad_a, grad_b)",
    "docstring": "Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_RsqrtGradGrad arg:op arg:grad arguments arg arg Assign Assign With Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_path",
    "source_code": "def get_path(self):\n    _path, fillable = self._get_path_in_displaycoord()\n    if np.iterable(fillable):\n        _path = Path.make_compound_path(*_path)\n    return self.get_transform().inverted().transform_path(_path)",
    "docstring": "Return the path of the arrow in the data coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_path arg:self arguments arg Assign Call If Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "virtualenv",
    "name": "CPythonPosix",
    "source_code": "class CPythonPosix(CPython, PosixSupports, ABC):\n\n    @classmethod\n    def _executables(cls, interpreter):\n        host_exe = Path(interpreter.system_executable)\n        major, minor = (interpreter.version_info.major, interpreter.version_info.minor)\n        targets = OrderedDict(((i, None) for i in ['python', f'python{major}', f'python{major}.{minor}', host_exe.name]))\n        yield (host_exe, list(targets.keys()), RefMust.NA, RefWhen.ANY)",
    "docstring": "Create a CPython virtual environment on POSIX platforms.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\cpython\\common.py",
    "ast_data": "ClassDef name:CPythonPosix FunctionDef name:_executables arg:cls arg:interpreter arguments arg arg Assign Call Assign Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_collect_local_shard",
    "source_code": "def _collect_local_shard(module: torch.nn.Module) -> torch.nn.Module:\n\n    def hook_func(_module, _input, output):\n        if isinstance(output, ShardedTensor):\n            local_tensor = output.local_tensor()\n            sharding_spec = output._sharding_spec\n            if isinstance(sharding_spec, ChunkShardingSpec) and local_tensor.size(sharding_spec.dim) == 1:\n                local_tensor = local_tensor.squeeze(output._sharding_spec.dim)\n            return local_tensor\n    module.register_forward_hook(hook_func)\n    return module",
    "docstring": "Hook a module with local shards collection in the forward pass. This API is typically used to convert a sharded representation back to data parallel representation. In particular, it returns the local tensor for this Shard. If the size along the sharding dimension for the local tensor is 1, this dimension is removed from the final result. For example a [4, 16] ShardedTensor across 4 ranks is typically a local Tensor of size [16] across each rank and not [1, 16] across each rank. Args: module (:class:): Module whose output is ShardedTensor and the local tensor value needs to be returned. Returns: A :class: object with collection API hooked.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\api.py",
    "ast_data": "FunctionDef name:_collect_local_shard arg:module arguments arg FunctionDef name:hook_func arg:_module arg:_input arg:output arguments arg arg arg If Call Assign Call Assign If BoolOp Call Compare Call Assign Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_initialize_for_tpu_strategy",
    "source_code": "def _initialize_for_tpu_strategy(self):\n    self._is_chief = True\n    self._poll_termination_signal_thread = None\n    self._cluster_wise_termination_watcher_thread = None\n    self._maybe_create_checkpoint_manager()\n    self._read_checkpoint_manager.restore_or_initialize()\n    self._run_counter = 0",
    "docstring": "Makes configurations for using the handler with TPUStrategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "FunctionDef name:_initialize_for_tpu_strategy arg:self arguments arg Assign Assign Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "is_concrete_bool",
    "source_code": "def is_concrete_bool(a: BoolLikeType) -> bool:\n    assert isinstance(a, (SymBool, bool))\n    if isinstance(a, bool):\n        return True\n    if isinstance(a.node.expr, (sympy.logic.boolalg.BooleanTrue, sympy.logic.boolalg.BooleanFalse)):\n        return True\n    return False",
    "docstring": "Utility to check if underlying object in SymBool is concrete value. Also returns true if integer is passed in. Args: a (SymBool or bool): Object to test if it bool",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:is_concrete_bool arg:a arguments arg Call If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "DynamicDimConstraintPrinter",
    "source_code": "class DynamicDimConstraintPrinter(PythonPrinter):\n\n    def __init__(self, symbol_to_source: dict[sympy.Symbol, list[Source]], source_name_to_debug_name: Mapping[str, str]):\n        super().__init__()\n        self.symbol_to_source = symbol_to_source\n        self.source_name_to_debug_name = source_name_to_debug_name\n\n    def _print_Symbol(self, expr: sympy.Symbol) -> str:\n        assert isinstance(expr, sympy.Symbol), str(type(expr))\n        assert self.symbol_to_source.get(expr), f'Unknown symbol {expr} created by constraints solver'\n        return self.symbol_to_source[expr][0].name()",
    "docstring": "Printer for dynamic dim constraints. - Instead of symbol s_k it prints its source t.size()[i] - Instead of Eq(_, _), Mod(_, _), etc. it prints _ == _, _ % _, etc. We use this to suggest code for specifying dynamic dim constraints.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "ClassDef name:DynamicDimConstraintPrinter FunctionDef name:__init__ arg:self arg:symbol_to_source arg:source_name_to_debug_name arguments arg arg arg Call Call Assign Assign FunctionDef name:_print_Symbol arg:self arg:expr arguments arg arg Call Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__le__",
    "source_code": "def __le__(self, other):\n    return less_equal(self, other)",
    "docstring": "Return (self <= other) element-wise. See Also -------- less_equal",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:__le__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "alias_tensors",
    "source_code": "def alias_tensors(*args):\n\n    def alias_if_tensor(a):\n        return array_ops.identity(a) if isinstance(a, tensor.Tensor) else a\n    if len(args) > 1:\n        return (alias_if_tensor(a) for a in args)\n    elif len(args) == 1:\n        return alias_if_tensor(args[0])\n    raise ValueError('at least one argument required')",
    "docstring": "Wraps any Tensor arguments with an identity op. Any other argument, including Variables, is returned unchanged. Args: *args: Any arguments. Must contain at least one element. Returns: Same as *args, with Tensor instances replaced as described. Raises: ValueError: If args doesn't meet the requirements.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\utils\\misc.py",
    "ast_data": "FunctionDef name:alias_tensors arguments arg FunctionDef name:alias_if_tensor arg:a arguments arg Return return:yes Call Call If Compare Call Return return:yes Call If Compare Call Return return:yes Call Raise Call"
  },
  {
    "library": "kornia",
    "name": "sample_ray_points",
    "source_code": "def sample_ray_points(origins: Tensor, directions: Tensor, lengths: Tensor) -> Tensor:\n    points_3d = origins[..., None, :] + lengths[..., None] * directions[..., None, :]\n    return points_3d",
    "docstring": "Sample points along ray. Args: origins: tensor containing ray origins in 3d world coordinates. Tensor shape :math:. directions: tensor containing ray directions in 3d world coordinates. Tensor shape :math:. lengths: tensor containing sampled distances along each ray. Tensor shape :math:. Returns: points_3d: Points along rays :math:",
    "type": "function",
    "file_path": "kornia\\kornia\\nerf\\samplers.py",
    "ast_data": "FunctionDef name:sample_ray_points arg:origins arg:directions arg:lengths arguments arg arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "reorder_compute_for_overlap",
    "source_code": "def reorder_compute_for_overlap(snodes: list[BaseSchedulerNode]) -> list[BaseSchedulerNode]:\n    return _schedule_for_comm(snodes, raise_comms=True, sink_waits=True, reorder_for_overlap=True)",
    "docstring": "This achieves the following overall scheduling procedure: Step 1: Given that we've currently scheduled comm N, we now schedule all compute nodes that are required for comm N + 1 but do not depend on comm N, to run at the same time with comm N. Step 2: If all those compute nodes are sufficient to overlap comm N, we're done. Otherwise, we now need to look elsewhere to find compute that overlaps with comm N. We prioritize compute nodes that are needed sooner. Step 3: We schedule the compute nodes dependent on comm N and required for comm N + 1. Step 4: We schedule comm N + 1. Repeat this for subsequent comm nodes.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\comms.py",
    "ast_data": "FunctionDef name:reorder_compute_for_overlap arg:snodes arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "russellrao",
    "source_code": "def russellrao(u, v, w=None):\n    u = _validate_vector(u)\n    v = _validate_vector(v)\n    if u.dtype == v.dtype == bool and w is None:\n        ntt = (u & v).sum()\n        n = float(len(u))\n    elif w is None:\n        ntt = (u * v).sum()\n        n = float(len(u))\n    else:\n        w = _validate_weights(w)\n        ntt = (u * v * w).sum()\n        n = w.sum()\n    return float(n - ntt) / n",
    "docstring": "Compute the Russell-Rao dissimilarity between two boolean 1-D arrays. The Russell-Rao dissimilarity between two boolean 1-D arrays, and , is defined as .. math:: \\frac{n - c_{TT}} {n} where :math: is the number of occurrences of :math: and :math: for :math:`k >> from scipy.spatial import distance >>> distance.russellrao([1, 0, 0], [0, 1, 0]) 1.0 >>> distance.russellrao([1, 0, 0], [1, 1, 0]) 0.6666666666666666 >>> distance.russellrao([1, 0, 0], [2, 0, 0]) 0.3333333333333333",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:russellrao arg:u arg:v arg:w arguments arg arg arg Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign Call Call If Compare Assign Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_increase_rank_to",
    "source_code": "def _increase_rank_to(t, rank):\n    if isinstance(t, ragged_tensor.RaggedTensor):\n        return t.with_values(_increase_rank_to(t, rank - 1))\n    else:\n        old_dims = array_ops.shape(t)\n        new_dims = array_ops.ones([rank - array_ops.rank(t)], old_dims.dtype)\n        new_shape = array_ops.concat([old_dims, new_dims], axis=0)\n        return array_ops.reshape(t, new_shape)",
    "docstring": "Adds *trailing* size-1 dimensions to until it has the given rank.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_gather_ops.py",
    "ast_data": "FunctionDef name:_increase_rank_to arg:t arg:rank arguments arg arg If Call Return return:yes Call Call Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_format_time_share",
    "source_code": "def _format_time_share(time_us, total_time_us):\n    if total_time_us == 0:\n        assert time_us == 0, f'Expected time_us == 0 but got {time_us}'\n        return 'NaN'\n    return f'{time_us * 100.0 / total_time_us:.2f}%'",
    "docstring": "Define how to format time in FunctionEvent.",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\profiler_util.py",
    "ast_data": "FunctionDef name:_format_time_share arg:time_us arg:total_time_us arguments arg arg If Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "check_per_example_loss_rank",
    "source_code": "@tf_contextlib.contextmanager\ndef check_per_example_loss_rank(per_example_loss):\n    loss_rank = per_example_loss.shape.rank\n    if loss_rank is not None:\n        if loss_rank == 0:\n            raise ValueError(f'Invalid value passed for `per_example_loss`. Expected a tensor with at least rank 1. Received per_example_loss={per_example_loss} with rank {loss_rank}')\n        yield\n    else:\n        with ops.control_dependencies([check_ops.assert_greater_equal(array_ops.rank(per_example_loss), math_ops.cast(1, dtype=dtypes.int32), message='Invalid value passed for `per_example_loss`. Expected a tensor with at least rank 1.')]):\n            yield",
    "docstring": "Context manager that checks that the rank of per_example_loss is at least 1. Args: per_example_loss: Per example loss tensor. Yields: A context manager.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\util.py",
    "ast_data": "FunctionDef name:check_per_example_loss_rank arg:per_example_loss arguments arg Assign If Compare If Compare Raise Call With Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_tensor_ndim",
    "source_code": "def _tensor_ndim(self):\n    return self.shape.ndims",
    "docstring": "Returns the rank of the Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_math_ops.py",
    "ast_data": "FunctionDef name:_tensor_ndim arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_meta_graph_def",
    "source_code": "def get_meta_graph_def(saved_model_dir, tag_set):\n    saved_model = read_saved_model(saved_model_dir)\n    set_of_tags = set([tag for tag in tag_set.split(',') if tag])\n    valid_tags = []\n    for meta_graph_def in saved_model.meta_graphs:\n        meta_graph_tags = set(meta_graph_def.meta_info_def.tags)\n        if meta_graph_tags == set_of_tags:\n            return meta_graph_def\n        else:\n            valid_tags.append(','.join(meta_graph_tags))\n    raise RuntimeError(f'MetaGraphDef associated with tag-set {tag_set} could not be found in the SavedModel. Please use one of the following tag-sets: {valid_tags}')",
    "docstring": "Gets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given tag-set and SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect. tag_set: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. The empty string tag is ignored so that passing '' means the empty tag set. For tag-set contains multiple tags, all tags must be passed in. Raises: RuntimeError: An error when the given tag-set does not exist in the SavedModel. Returns: A MetaGraphDef corresponding to the tag-set.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_utils.py",
    "ast_data": "FunctionDef name:get_meta_graph_def arg:saved_model_dir arg:tag_set arguments arg arg Assign Call Assign Call Call Assign For Assign Call If Compare Return return:yes Call Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "draw",
    "source_code": "def draw(self, drawDC=None):\n    FigureCanvasAgg.draw(self)\n    self.bitmap = self._create_bitmap()\n    self._isDrawn = True\n    self.gui_repaint(drawDC=drawDC)",
    "docstring": "Render the figure using agg.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wxagg.py",
    "ast_data": "FunctionDef name:draw arg:self arg:drawDC arguments arg arg Call Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "apply",
    "source_code": "def apply(self, func, *args, include_groups: bool=False, **kwargs) -> NDFrameT:\n    if include_groups:\n        raise ValueError('include_groups=True is no longer allowed.')\n    if isinstance(func, str):\n        if hasattr(self, func):\n            res = getattr(self, func)\n            if callable(res):\n                return res(*args, **kwargs)\n            elif args or kwargs:\n                raise ValueError(f'Cannot pass arguments to property {func}')\n            return res\n        else:\n            raise TypeError(f\"apply func should be callable, not '{func}'\")\n    elif args or kwargs:\n        if callable(func):\n\n            @wraps(func)\n            def f(g):\n                return func(g, *args, **kwargs)\n        else:\n            raise ValueError('func must be a callable if args or kwargs are supplied')\n    else:\n        f = func\n    return self._python_apply_general(f, self._obj_with_exclusions)",
    "docstring": "Apply function `gotchas.udf-mutationapplyapplyapplyapplyapplya transform applyapply` for one of the group. This group is filtered from the result: >>> g1.apply(lambda x: None if x.iloc[0, 0] == 3 else x) B C 0 1 4 1 2 6",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:apply arg:self arg:func arguments arg arg arg arg arg If Raise Call If Call If Call Assign Call If Call Return return:yes Call If BoolOp Raise Call Return return:yes Raise Call If BoolOp If Call FunctionDef name:f arg:g arguments arg Return return:yes Call Call Raise Call Assign Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "note_hyperlink_target",
    "source_code": "def note_hyperlink_target(self, name: str, docname: str, node_id: str, title: str='') -> None:\n    if name in self.anonlabels and self.anonlabels[name] != (docname, node_id):\n        logger.warning(__('duplicate label %s, other instance in %s'), name, self.env.doc2path(self.anonlabels[name][0]))\n    self.anonlabels[name] = (docname, node_id)\n    if title:\n        self.labels[name] = (docname, node_id, title)",
    "docstring": "Add a hyperlink target for cross reference. .. warning:: This is only for internal use. Please don't use this from your extension. `` are recommended to add a hyperlink target to the document. This only adds a hyperlink target to the StandardDomain. And this does not add a node_id to node. Therefore, it is very fragile to calling this without understanding hyperlink target framework in both docutils and Sphinx. .. versionadded:: 3.0",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "FunctionDef name:note_hyperlink_target arg:self arg:name arg:docname arg:node_id arg:title arguments arg arg arg arg arg If BoolOp Compare Compare Call Call Call Assign If Assign"
  },
  {
    "library": "scipy",
    "name": "_copy",
    "source_code": "def _copy(self, system):\n    self.poles = system.poles\n    self.zeros = system.zeros\n    self.gain = system.gain",
    "docstring": "Copy the parameters of another system. Parameters ---------- system : instance of The zeros, poles gain system that is to be copied",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:_copy arg:self arg:system arguments arg arg Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_on_size",
    "source_code": "def _on_size(self, event):\n    self._update_device_pixel_ratio()\n    _log.debug('%s - _on_size()', type(self))\n    sz = self.GetParent().GetSizer()\n    if sz:\n        si = sz.GetItem(self)\n    if sz and si and (not si.Proportion) and (not si.Flag & wx.EXPAND):\n        size = self.GetMinSize()\n    else:\n        size = self.GetClientSize()\n        size.IncTo(self.GetMinSize())\n    if getattr(self, '_width', None):\n        if size == (self._width, self._height):\n            return\n    self._width, self._height = size\n    self._isDrawn = False\n    if self._width <= 1 or self._height <= 1:\n        return\n    dpival = self.figure.dpi\n    if not wx.Platform == '__WXMSW__':\n        scale = self.GetDPIScaleFactor()\n        dpival /= scale\n    winch = self._width / dpival\n    hinch = self._height / dpival\n    self.figure.set_size_inches(winch, hinch, forward=False)\n    self.Refresh(eraseBackground=False)\n    ResizeEvent('resize_event', self)._process()\n    self.draw_idle()",
    "docstring": "Called when wxEventSize is generated. In this application we attempt to resize to fit the window, so it is better to take the performance hit and redraw the whole window.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_on_size arg:self arg:event arguments arg arg Call Call Call Assign Call Call If Assign Call If BoolOp Assign Call Assign Call Call Call If Call If Compare Return return:no Assign Assign If BoolOp Compare Compare Return return:no Assign If Compare Assign Call Assign Assign Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X, y=None, groups=None):\n    X, y, groups = indexable(X, y, groups)\n    indices = np.arange(_num_samples(X))\n    for test_index in self._iter_test_masks(X, y, groups):\n        train_index = indices[np.logical_not(test_index)]\n        test_index = indices[test_index]\n        yield (train_index, test_index)",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) The target variable for supervised learning problems. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Assign Call Assign Call Call For Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "key_of",
    "source_code": "@staticmethod\ndef key_of(node):\n    sizevars = V.graph.sizevars\n    return (node.get_device().type, str(node.get_dtype()), *sizevars.size_hints(node.get_size(), fallback=config.unbacked_symint_fallback), *sizevars.size_hints(node.get_stride(), fallback=config.unbacked_symint_fallback), sizevars.size_hint(node.get_layout().offset, fallback=config.unbacked_symint_fallback))",
    "docstring": "Extract the pieces of an ir.Buffer that we should invalidate cached autotuning results on.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:key_of arg:node arguments arg Assign Return return:yes Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "__len__",
    "source_code": "def __len__(self) -> int:\n    return len(self._subplot_list)",
    "docstring": "Return the number of subplots in this figure.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\subplots.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, strategy, cluster_spec, task_type, task_id, session_config=None, rpc_layer='grpc', worker_barrier=None):\n    self._strategy = strategy\n    self._cluster_spec = cluster_spec\n    self._task_type = task_type\n    self._task_id = task_id\n    self._session_config = session_config\n    self._worker_barrier = worker_barrier\n    self._rpc_layer = rpc_layer\n    self._master_target = self._get_master_target()\n    self._num_workers = _get_num_workers(cluster_spec)\n    self._is_chief_node = self._is_chief()",
    "docstring": "Initialize the worker context object. Args: strategy: a object. cluster_spec: a ClusterSpec object. It can be empty or None in the local training case. task_type: a string indicating the role of the corresponding task, such as \"worker\" or \"ps\". It can be None if it is local training or in-graph replicated training. task_id: an integer indicating id of the corresponding task. It can be None if it is local training or in-graph replicated training. session_config: an optional object. rpc_layer: optional string specifying the RPC protocol for communication with worker masters. If None or empty, hosts in the will be used directly. worker_barrier: optional, the barrier object for worker synchronization.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:strategy arg:cluster_spec arg:task_type arg:task_id arg:session_config arg:rpc_layer arg:worker_barrier arguments arg arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Call Assign Call Assign Call"
  },
  {
    "library": "django",
    "name": "reset",
    "source_code": "def reset(self):\n    self.get_template_cache.clear()",
    "docstring": "Empty the template cache.",
    "type": "method",
    "file_path": "django\\django\\template\\loaders\\cached.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg Call"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> Ed448PublicKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed448.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "new_subgroups_by_enumeration",
    "source_code": "def new_subgroups_by_enumeration(ranks_per_subgroup_list, timeout=None, backend=None, pg_options=None, group_desc=None):\n    if ranks_per_subgroup_list is None or len(ranks_per_subgroup_list) == 0:\n        raise ValueError(\"The arg 'ranks_per_subgroup_list' cannot be empty\")\n    subgroups = []\n    cur_subgroup = None\n    rank_to_ranks_dict = {}\n    for ranks in ranks_per_subgroup_list:\n        subgroup = new_group(ranks=ranks, timeout=timeout, backend=backend, pg_options=pg_options, group_desc=group_desc)\n        subgroups.append(subgroup)\n        my_rank = get_rank()\n        for rank in ranks:\n            if rank in rank_to_ranks_dict:\n                raise ValueError(f'Rank {rank} has appeared in both subgroup {rank_to_ranks_dict[rank]} and {ranks}')\n            rank_to_ranks_dict[rank] = ranks\n            if my_rank == rank:\n                cur_subgroup = subgroup\n                logger.info('Rank %s is assigned to subgroup %s', rank, ranks)\n    return (cur_subgroup, subgroups)",
    "docstring": "Create subgroups by dividing the global world. The division is specified by a nested list of ranks. The subgroups cannot have overlap, and some ranks may not have to be in any subgroup. This is a convenience API that calls `Safe concurrent usagenew_groupinit_process_groupBackend` can be specified so that process group can pick up high priority cuda streams. group_desc (str, optional): A string describing the group. Each subgroup will inherit its group_desc. Returns: The subgroup containing the current rank, and all the subgroups used for cleanup. Examples: >>> # Create two subgroups, where each has 2 processes. >>> # xdoctest: +SKIP(\"need process group init\") >>> cur_subgroup, subgroups = dist.new_subgroups(ranks=[[0, 2], [1, 3]]) >>> rank = dist.get_rank() >>> tensor = torch.ones(1, device=rank) * rank >>> dist.all_reduce(tensor, group=cur_subgroup) >>> tensor tensor([2]) # Subgroup 0: ranks 0 and 2 tensor([4]) # Subgroup 1: ranks 1 and 3",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:new_subgroups_by_enumeration arg:ranks_per_subgroup_list arg:timeout arg:backend arg:pg_options arg:group_desc arguments arg arg arg arg arg If BoolOp Compare Compare Call Raise Call Assign Assign Assign For Assign Call Call Assign Call For If Compare Raise Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_injective",
    "source_code": "@property\ndef _is_injective(self):\n    return True",
    "docstring": "Returns true iff the forward map is injective (one-to-one function). **WARNING** This hidden property and its behavior are subject to change. Note: Non-injective maps are supported, provided their domain can be partitioned into disjoint subsets, , such that, ignoring sets of measure zero, the restriction of to each subset is a differentiable bijection onto . Returns: is_injective: Python .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_is_injective arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "init",
    "source_code": "def init(self, est, begin_at_stage=0):\n    header_fields = ['Iter', 'Train Loss']\n    verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']\n    if est.subsample < 1:\n        header_fields.append('OOB Improve')\n        verbose_fmt.append('{oob_impr:>16.4f}')\n    header_fields.append('Remaining Time')\n    verbose_fmt.append('{remaining_time:>16s}')\n    print(('%10s ' + '%16s ' * (len(header_fields) - 1)) % tuple(header_fields))\n    self.verbose_fmt = ' '.join(verbose_fmt)\n    self.verbose_mod = 1\n    self.start_time = time()\n    self.begin_at_stage = begin_at_stage",
    "docstring": "Initialize reporter Parameters ---------- est : Estimator The estimator begin_at_stage : int, default=0 stage at which to begin reporting",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:init arg:self arg:est arg:begin_at_stage arguments arg arg arg Assign Assign If Compare Call Call Call Call Call Call Call Assign Call Assign Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "@override\ndef state_dict(self) -> dict[str, Any]:\n    state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')}\n    state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas)\n    for idx, fn in enumerate(self.lr_lambdas):\n        if not isinstance(fn, types.FunctionType):\n            state_dict['lr_lambdas'][idx] = fn.__dict__.copy()\n    return state_dict",
    "docstring": "Return the state of the scheduler as a :class:. It contains an entry for every variable in self.__dict__ which is not the optimizer. The learning rate lambda functions will only be saved if they are callable objects and not if they are functions or lambdas.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg Assign Call Compare Assign Call For Call If Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X, y=None, groups=None):\n    X, y, groups = indexable(X, y, groups)\n    n_samples = _num_samples(X)\n    if self.n_splits > n_samples:\n        raise ValueError('Cannot have number of splits n_splits={0} greater than the number of samples: n_samples={1}.'.format(self.n_splits, n_samples))\n    for train, test in super().split(X, y, groups):\n        yield (train, test)",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Assign Call Assign Call If Compare Raise Call Call For Call Call"
  },
  {
    "library": "pandas",
    "name": "invalid_comparison",
    "source_code": "def invalid_comparison(left: ArrayLike, right: ArrayLike | list | Scalar, op: Callable[[Any, Any], bool]) -> npt.NDArray[np.bool_]:\n    if op is operator.eq:\n        res_values = np.zeros(left.shape, dtype=bool)\n    elif op is operator.ne:\n        res_values = np.ones(left.shape, dtype=bool)\n    else:\n        typ = type(right).__name__\n        raise TypeError(f'Invalid comparison between dtype={left.dtype} and {typ}')\n    return res_values",
    "docstring": "If a comparison has mismatched types and is not necessarily meaningful, follow python3 conventions by: - returning all-False for equality - returning all-True for inequality - raising TypeError otherwise Parameters ---------- left : array-like right : scalar, array-like op : operator.{eq, ne, lt, le, gt} Raises ------ TypeError : on inequality comparisons",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\invalid.py",
    "ast_data": "FunctionDef name:invalid_comparison arg:left arg:right arg:op arguments arg arg arg If Compare Assign Call If Compare Assign Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "QuantStub",
    "source_code": "class QuantStub(nn.Module):\n\n    def __init__(self, qconfig=None):\n        super().__init__()\n        if qconfig:\n            self.qconfig = qconfig\n\n    def forward(self, x):\n        return x",
    "docstring": "Quantize stub module, before calibration, this is same as an observer, it will be swapped as in . Args: qconfig: quantization configuration for the tensor, if qconfig is not provided, we will get qconfig from parent modules",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\stubs.py",
    "ast_data": "ClassDef name:QuantStub FunctionDef name:__init__ arg:self arg:qconfig arguments arg arg Call Call If Assign FunctionDef name:forward arg:self arg:x arguments arg arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_shallow_copy",
    "source_code": "def _shallow_copy(self, values, name: Hashable=no_default) -> Self:\n    name = self._name if name is no_default else name\n    return self._simple_new(values, name=name, refs=self._references)",
    "docstring": "Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional name : Label, defaults to self.name",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_shallow_copy arg:self arg:values arg:name arguments arg arg arg Assign Compare Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, *, use_overline=False, one_half='\\\\frac{1}{2}', minor=False, minor_threshold=25, minor_number=6):\n    self._use_overline = use_overline\n    self._one_half = one_half\n    self._minor = minor\n    self._labelled = set()\n    self._minor_threshold = minor_threshold\n    self._minor_number = minor_number",
    "docstring": "Parameters ---------- use_overline : bool, default: False If x > 1/2, with x = 1 - v, indicate if x should be displayed as $\\overline{v}$. The default is to display $1 - v$. one_half : str, default: r\"\\\\frac{1}{2}\" The string used to represent 1/2. minor : bool, default: False Indicate if the formatter is formatting minor ticks or not. Basically minor ticks are not labelled, except when only few ticks are provided, ticks with most space with neighbor ticks are labelled. See other parameters to change the default behavior. minor_threshold : int, default: 25 Maximum number of locs for labelling some minor ticks. This parameter have no effect if minor is False. minor_number : int, default: 6 Number of ticks which are labelled when the number of ticks is below the threshold.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg Assign Assign Assign Assign Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_stateful_to_state_dict",
    "source_code": "@_dcp_method_logger(log_exceptions=True)\ndef _stateful_to_state_dict(state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:\n    stateful_state_dict = {}\n    for key, elem in state_dict.items():\n        stateful_state_dict[key] = elem.state_dict() if isinstance(elem, Stateful) else elem\n    return stateful_state_dict",
    "docstring": "Creates a shallow copy of where is called for each Stateful object.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict_saver.py",
    "ast_data": "FunctionDef name:_stateful_to_state_dict arg:state_dict arguments arg Assign For Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_zeros",
    "source_code": "def _zeros(shape, dtype):\n    if dtype == dtypes.string or dtype == dtypes.resource:\n        return None\n    ctx = context.context()\n    if not ctx.executing_eagerly():\n        return array_ops.zeros(shape, dtype)\n    device = ctx.device_name\n    if tensor_util.is_tf_type(shape):\n        shape_key = shape.ref()\n    else:\n        shape_key = shape\n    cache_key = (shape_key, dtype, device)\n    cached = ctx.zeros_cache().get(cache_key)\n    if cached is None:\n        if dtypes.as_dtype(dtype).is_bool:\n            value = False\n        else:\n            value = 0\n        cached = _fast_fill(value, shape, dtype)\n        ctx.zeros_cache().put(cache_key, cached)\n    return cached",
    "docstring": "Helper to return (possibly cached) zero tensors in eager mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:_zeros arg:shape arg:dtype arguments arg arg If BoolOp Compare Compare Return return:no Assign Call If Call Return return:yes Call Assign If Call Assign Call Assign Assign Assign Call Call If Compare If Call Assign Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "not_eq",
    "source_code": "def not_eq(a, b):\n    return not_(eq(a, b))",
    "docstring": "Functional form of \"not-equal\".",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\logical.py",
    "ast_data": "FunctionDef name:not_eq arg:a arg:b arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "hermitian_transpose",
    "source_code": "def hermitian_transpose(self):\n    return self._from_matrix(sm_ops.sparse_matrix_transpose(self._matrix, conjugate=True, type=self.dtype), self.eager_handle_data)",
    "docstring": "Return the hermitian transpose of the matrix.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_ops.py",
    "ast_data": "FunctionDef name:hermitian_transpose arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_toolkit_path",
    "source_code": "def _get_toolkit_path():\n    sycl_toolkit_path = None\n    sycl_toolkit_path = _get_default_sycl_toolkit_path()\n    if 'SYCL_TOOLKIT_PATH' in os.environ:\n        sycl_toolkit_path = os.environ['SYCL_TOOLKIT_PATH']\n    return os.path.realpath(sycl_toolkit_path)",
    "docstring": "Determines and returns the SYCL installation path.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_sycl_config.py",
    "ast_data": "FunctionDef name:_get_toolkit_path arguments Assign Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "benchmark_cuda_function_in_microseconds",
    "source_code": "def benchmark_cuda_function_in_microseconds(func: Callable, *args, **kwargs) -> float:\n\n    def no_args():\n        func(*args, **kwargs)\n    time = do_bench_using_profiling(no_args)\n    return time * 1000.0",
    "docstring": "Thin wrapper around do_bench_using_profiling",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\transformer\\sdpa.py",
    "ast_data": "FunctionDef name:benchmark_cuda_function_in_microseconds arg:func arguments arg arg arg FunctionDef name:no_args arguments Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_remove_non_arrays",
    "source_code": "def _remove_non_arrays(*arrays, remove_none=True, remove_types=(str,)):\n    filtered_arrays = []\n    remove_types = tuple(remove_types)\n    for array in arrays:\n        if remove_none and array is None:\n            continue\n        if isinstance(array, remove_types):\n            continue\n        if sp.issparse(array):\n            continue\n        filtered_arrays.append(array)\n    return filtered_arrays",
    "docstring": "Filter arrays to exclude None and/or specific types. Sparse arrays are always filtered out. Parameters ---------- *arrays : array objects Array objects. remove_none : bool, default=True Whether to ignore None objects passed in arrays. remove_types : tuple or list, default=(str,) Types to ignore in the arrays. Returns ------- filtered_arrays : list List of arrays filtered as requested. An empty list is returned if no input passes the filters.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:_remove_non_arrays arguments arg arg arg Assign Assign Call For If BoolOp Compare If Call If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "trunc_normal_",
    "source_code": "def trunc_normal_(tensor: Tensor, mean: float=0.0, std: float=1.0, a: float=-2.0, b: float=2.0, generator: _Optional[torch.Generator]=None) -> Tensor:\n    return _no_grad_trunc_normal_(tensor, mean, std, a, b, generator=generator)",
    "docstring": "Fill the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math: with values outside :math: redrawn until they are within the bounds. The method used for generating the random values works best when :math:. Args: tensor: an n-dimensional mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value generator: the torch Generator to sample from (default: None) Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\init.py",
    "ast_data": "FunctionDef name:trunc_normal_ arg:tensor arg:mean arg:std arg:a arg:b arg:generator arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_detect_save_format",
    "source_code": "def _detect_save_format(filepath):\n    filepath = path_to_string(filepath)\n    if saving_utils.is_hdf5_filepath(filepath):\n        return (filepath, 'h5')\n    if _is_readable_tf_checkpoint(filepath):\n        save_format = 'tf'\n    elif sm_loader.contains_saved_model(filepath):\n        ckpt_path = os.path.join(filepath, sm_constants.VARIABLES_DIRECTORY, sm_constants.VARIABLES_FILENAME)\n        if _is_readable_tf_checkpoint(ckpt_path):\n            filepath = ckpt_path\n            save_format = 'tf'\n        else:\n            raise ValueError(\"Unable to load weights. filepath {} appears to be a SavedModel directory, but checkpoint either doesn't exist, or is incorrectly formatted.\".format(filepath))\n    else:\n        save_format = 'h5'\n    return (filepath, save_format)",
    "docstring": "Returns path to weights file and save format.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:_detect_save_format arg:filepath arguments arg Assign Call If Call Return return:yes If Call Assign If Call Assign Call If Call Assign Assign Raise Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_text_width_height_descent",
    "source_code": "@classmethod\ndef get_text_width_height_descent(cls, tex, fontsize, renderer=None):\n    if tex.strip() == '':\n        return (0, 0, 0)\n    dvifile = cls.make_dvi(tex, fontsize)\n    dpi_fraction = renderer.points_to_pixels(1.0) if renderer else 1\n    with dviread.Dvi(dvifile, 72 * dpi_fraction) as dvi:\n        page, = dvi\n    return (page.width, page.height + page.descent, page.descent)",
    "docstring": "Return width, height and descent of the text.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py",
    "ast_data": "FunctionDef name:get_text_width_height_descent arg:cls arg:tex arg:fontsize arg:renderer arguments arg arg arg arg If Compare Call Return return:yes Assign Call Assign Call With Call Assign Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "set",
    "source_code": "def set(self, value: Any, priority: int) -> None:\n    if priority >= self.priority:\n        if isinstance(self.value, BaseSettings):\n            value = BaseSettings(value, priority=priority)\n        self.value = value\n        self.priority = priority",
    "docstring": "Sets value if priority is higher or equal than current priority.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:set arg:self arg:value arg:priority arguments arg arg arg If Compare If Call Assign Call Assign Assign"
  },
  {
    "library": "django",
    "name": "from_string",
    "source_code": "def from_string(self, template_code):\n    raise NotImplementedError('subclasses of BaseEngine should provide a from_string() method')",
    "docstring": "Create and return a template for the given source code. This method is optional.",
    "type": "method",
    "file_path": "django\\django\\template\\backends\\base.py",
    "ast_data": "FunctionDef name:from_string arg:self arg:template_code arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "LazyVariableTracker",
    "source_code": "class LazyVariableTracker(object):\n\n    def __init__(self):\n        self._uninitialized_var_list = []\n\n    def initialize_all(self):\n\n        def assign_function(uninitialized_var_list):\n            for var in uninitialized_var_list:\n                val = var._initial_value\n                packed_var = getattr(var, '_packed_var', None)\n                handle = getattr(packed_var, 'packed_handle', var.handle)\n                with ops.device(handle.device):\n                    resource_variable_ops.AssignVariableOp(resource=handle, value=val)\n            return constant_op.constant([])\n        assign_tf_function = def_function.function(assign_function, autograph=False, jit_compile=False)\n        with ops.init_scope():\n            if len(self._uninitialized_var_list) > 1:\n                assign_tf_function(self._uninitialized_var_list)\n            else:\n                assign_function(self._uninitialized_var_list)\n        self._uninitialized_var_list = []\n\n    def add_uninitialized_var(self, var):\n        self._uninitialized_var_list.append(var)",
    "docstring": "Class to track uninitialized lazy variables.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_util.py",
    "ast_data": "ClassDef name:LazyVariableTracker FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:initialize_all arg:self arguments arg FunctionDef name:assign_function arg:uninitialized_var_list arguments arg For Assign Assign Call Assign Call With Call Call Return return:yes Call Assign Call With Call If Compare Call Call Call Assign FunctionDef name:add_uninitialized_var arg:self arg:var arguments arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "new_frame_seq",
    "source_code": "def new_frame_seq(self):\n    return iter(self._framedata)",
    "docstring": "Return a new sequence of frame information.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:new_frame_seq arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_compute_shear_matrix",
    "source_code": "def _compute_shear_matrix(shear: Tensor) -> Tensor:\n    matrix: Tensor = eye_like(3, shear, shared_memory=False)\n    shx, shy = torch.chunk(shear, chunks=2, dim=-1)\n    matrix[..., 0, 1:2] += shx\n    matrix[..., 1, 0:1] += shy\n    return matrix",
    "docstring": "Compute affine matrix for shearing.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "FunctionDef name:_compute_shear_matrix arg:shear arguments arg Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "to_list",
    "source_code": "def to_list(self):\n    if not isinstance(self.row_splits, ops.EagerTensor):\n        raise ValueError('to_list can only be used in eager mode.')\n    row_splits = self.row_splits.numpy().tolist()\n    values = self.values\n    if isinstance(values, RaggedTensor):\n        return [values[row_splits[i]:row_splits[i + 1]].to_list() for i in range(len(row_splits) - 1)]\n    else:\n        if hasattr(values, 'numpy'):\n            values_as_list = values.numpy().tolist()\n        elif hasattr(values, 'to_list'):\n            values_as_list = values.to_list()\n        else:\n            raise ValueError('values must be convertible to a list')\n        return [values_as_list[row_splits[i]:row_splits[i + 1]] for i in range(len(row_splits) - 1)]",
    "docstring": "Returns a nested Python with the values for this . Requires that was constructed in eager execution mode. Returns: A nested Python .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:to_list arg:self arguments arg If Call Raise Call Assign Call Call Assign If Call Return return:yes Call Call Call If Call Assign Call Call If Call Assign Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_get_edges",
    "source_code": "def _get_edges(padded, axis, width_pair):\n    left_index = width_pair[0]\n    left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)\n    left_edge = padded[left_slice]\n    right_index = padded.shape[axis] - width_pair[1]\n    right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)\n    right_edge = padded[right_slice]\n    return (left_edge, right_edge)",
    "docstring": "Retrieve edge values from empty-padded array in given dimension. Parameters ---------- padded : ndarray Empty-padded array. axis : int Dimension in which the edges are considered. width_pair : (int, int) Pair of widths that mark the pad area on both sides in the given dimension. Returns ------- left_edge, right_edge : ndarray Edge values of the valid area in in the given dimension. Its shape will always match except for the dimension given by which will have a length of 1.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_arraypad_impl.py",
    "ast_data": "FunctionDef name:_get_edges arg:padded arg:axis arg:width_pair arguments arg arg arg Assign Assign Call Call Assign Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "repeat",
    "source_code": "def repeat(n: int, body: Callable[..., Union[core_types.TensorLike, Iterable]], inputs: Optional[List[core_types.TensorLike]]=None, infeed_queue: Optional[tpu_feed.InfeedQueue]=None, name: Any=None) -> List[core_types.TensorLike]:\n\n    def _convert_to_list(xs):\n        if not isinstance(xs, (list, tuple)):\n            return [xs]\n        else:\n            return list(xs)\n\n    def cond(i, *args):\n        del args\n        return i < n\n\n    def body_wrapper(i, *args):\n        return [i + 1] + _convert_to_list(body(*args))\n    inputs = [0] if inputs is None else [0] + _convert_to_list(inputs)\n    outputs = while_loop(cond, body_wrapper, inputs=inputs, infeed_queue=infeed_queue, name=name)\n    outputs = _convert_to_list(outputs)\n    if len(outputs) == 1:\n        return outputs[0].op\n    else:\n        return outputs[1:]",
    "docstring": "Builds a training loop that executes a fixed number of iterations. The set of loop-carried tensors correspond to . must be a function that takes and returns the values of the loop-carried tensors. Args: n: the number of loop iterations body: a Python function that builds the loop body. inputs: a list of initial values passed into the training loop or None (equivalent to an empty list). infeed_queue: if not None, the infeed queue from which to append a tuple of arguments as inputs to condition. name: (Deprecated) Does nothing. Returns: The final values of the loop-carried tensors. Raises: ValueError: if there is a type error.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\training_loop.py",
    "ast_data": "FunctionDef name:repeat arg:n arg:body arg:inputs arg:infeed_queue arg:name arguments arg arg arg arg arg FunctionDef name:_convert_to_list arg:xs arguments arg If Call Return return:yes Return return:yes Call FunctionDef name:cond arg:i arguments arg arg Return return:yes Compare FunctionDef name:body_wrapper arg:i arguments arg arg Return return:yes Call Call Assign Compare Call Assign Call Assign Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, device, token, args):\n    use_tape_cache = self._support_graph_mode_gradient or record.could_possibly_record()\n    if use_tape_cache:\n        with backprop.GradientTape() as tape:\n            for tensor in args:\n                for t in nest.flatten(tensor):\n                    if backprop_util.IsTrainable(t):\n                        tape.watch(t)\n            outputs = self._call(device, args)\n        tape_cache[compat.as_bytes(token)] = (tape, args, outputs)\n    else:\n        outputs = self._call(device, args)\n    return outputs",
    "docstring": "Calls in eager mode, recording the tape if needed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:device arg:token arg:args arguments arg arg arg arg Assign BoolOp Call If With Call For For Call If Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_class_styles",
    "source_code": "@property\ndef _class_styles(self):\n    return [{'selector': f'.{self.class_name}', 'props': maybe_convert_css_to_tuples(self.class_properties)}]",
    "docstring": "Combine the `table_styles` to allow tooltips to render in HTML. Returns ------- styles : List",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_class_styles arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    return self._predict(X)",
    "docstring": "Predict using the multi-layer perceptron classifier. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- y : ndarray, shape (n_samples,) or (n_samples, n_classes) The predicted classes.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "make_samplers",
    "source_code": "def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n    gain = _range_bound(self.gain, 'gain', device=device, dtype=dtype)\n    self.gain_sampler = UniformDistribution(gain[0], gain[1], validate_args=False)\n    center = _range_bound(self.center, 'center', device=device, dtype=dtype)\n    self.center_sampler = UniformDistribution(center[0], center[1], validate_args=False)\n    sigma = _range_bound(self.sigma, 'sigma', device=device, dtype=dtype)\n    self.sigma_sampler = UniformDistribution(sigma[0], sigma[1], validate_args=False)\n    sign = _range_bound(self.sign, 'sign', bounds=(-1.0, 1.0), center=0.0, device=device, dtype=dtype)\n    self.sign_sampler = UniformDistribution(sign[0], sign[1], validate_args=False)",
    "docstring": "Create samplers for generating random gaussian illumination parameters.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\gaussian_illumination.py",
    "ast_data": "FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_resolve_hasher",
    "source_code": "def _resolve_hasher(algorithm, file_hash=None):\n    if algorithm == 'sha256':\n        return hashlib.sha256()\n    if algorithm == 'auto' and file_hash is not None and (len(file_hash) == 64):\n        return hashlib.sha256()\n    return hashlib.md5()",
    "docstring": "Returns hash algorithm as hashlib function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:_resolve_hasher arg:algorithm arg:file_hash arguments arg arg If Compare Return return:yes Call If BoolOp Compare Compare Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, coef_init=None, intercept_init=None):\n    self._more_validate_params()\n    lr = 'pa1' if self.loss == 'epsilon_insensitive' else 'pa2'\n    return self._fit(X, y, alpha=1.0, C=self.C, loss='epsilon_insensitive', learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init)",
    "docstring": "Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : numpy array of shape [n_samples] Target values. coef_init : array, shape = [n_features] The initial coefficients to warm-start the optimization. intercept_init : array, shape = [1] The initial intercept to warm-start the optimization. Returns ------- self : object Fitted estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_passive_aggressive.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:coef_init arg:intercept_init arguments arg arg arg arg arg Call Assign Compare Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_rebuild_tensor_from_dtensor_meta",
    "source_code": "def _rebuild_tensor_from_dtensor_meta(arg) -> object:\n    assert arg.tensor_meta is not None, 'DTensorSpec does not contain tensor_meta.'\n    return torch.empty_strided(arg.tensor_meta.shape, arg.tensor_meta.stride, dtype=arg.tensor_meta.dtype)",
    "docstring": "This is used to propagate tensor metadata, must be under fake mode",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py",
    "ast_data": "FunctionDef name:_rebuild_tensor_from_dtensor_meta arg:arg arguments arg Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "@override\ndef step(self, epoch=None) -> None:\n    if epoch is None and self.last_epoch < 0:\n        epoch = 0\n    if epoch is None:\n        epoch = self.last_epoch + 1\n        self.T_cur = self.T_cur + 1\n        if self.T_cur >= self.T_i:\n            self.T_cur = self.T_cur % self.T_i\n            self.T_i = self.T_i * self.T_mult\n    else:\n        if epoch < 0:\n            raise ValueError(f'Expected non-negative epoch, but got {epoch}')\n        if epoch >= self.T_0:\n            if self.T_mult == 1:\n                self.T_cur = epoch % self.T_0\n            else:\n                n = int(math.log(epoch / self.T_0 * (self.T_mult - 1) + 1, self.T_mult))\n                self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1)\n                self.T_i = self.T_0 * self.T_mult ** n\n        else:\n            self.T_i = self.T_0\n            self.T_cur = epoch\n    self.last_epoch = math.floor(epoch)\n    with _enable_get_lr_call(self):\n        for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):\n            param_group['lr'] = lr\n    self._last_lr = [group['lr'] for group in self.optimizer.param_groups]",
    "docstring": "Step could be called after every batch update. Example: >>> # xdoctest: +SKIP(\"Undefined vars\") >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) >>> iters = len(dataloader) >>> for epoch in range(20): >>> for i, sample in enumerate(dataloader): >>> inputs, labels = sample['inputs'], sample['labels'] >>> optimizer.zero_grad() >>> outputs = net(inputs) >>> loss = criterion(outputs, labels) >>> loss.backward() >>> optimizer.step() >>> scheduler.step(epoch + i / iters) This function can be called in an interleaved way. Example: >>> # xdoctest: +SKIP(\"Undefined vars\") >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) >>> for epoch in range(20): >>> scheduler.step() >>> scheduler.step(26) >>> scheduler.step() # scheduler.step(27), instead of scheduler(20)",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:step arg:self arg:epoch arguments arg arg If BoolOp Compare Compare Assign If Compare Assign Assign If Compare Assign Assign If Compare Raise Call If Compare If Compare Assign Assign Call Call Assign Assign Assign Assign Assign Call With Call For Call Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "add_choices",
    "source_code": "@staticmethod\ndef add_choices(choices, layout, input_nodes):\n    template = CKTileGemmTemplate(input_nodes, layout)\n    ops = template.gen_ops()\n    for op in ops:\n        template.maybe_append_choice(choices, op=op)",
    "docstring": "Add Composable Kernel Universal GEMM instance choices to the auto-tuning list.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\ck_tile_universal_gemm_template.py",
    "ast_data": "FunctionDef name:add_choices arg:choices arg:layout arg:input_nodes arguments arg arg arg Assign Call Assign Call For Call"
  },
  {
    "library": "pytorch",
    "name": "ir_name_to_func_name",
    "source_code": "def ir_name_to_func_name(name: str) -> str:\n    name_list = name.split('::')\n    return 'convert_' + '_'.join(name_list)",
    "docstring": "prim::If -> convert_prim_If",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\converter.py",
    "ast_data": "FunctionDef name:ir_name_to_func_name arg:name arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_action",
    "source_code": "def get_action(self, name):\n    return self._global_actions[name]",
    "docstring": "Explicitly get a registered global action whether it's enabled or not. Raise KeyError for invalid names.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:get_action arg:self arg:name arguments arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_log_weights",
    "source_code": "@abstractmethod\ndef _estimate_log_weights(self):\n    pass",
    "docstring": "Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm. Returns ------- log_weight : array, shape (n_components, )",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:_estimate_log_weights arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "_logpdf",
    "source_code": "def _logpdf(self, x, loc, prec_U, log_pdet, df, dim, rank):\n    if df == np.inf:\n        return multivariate_normal._logpdf(x, loc, prec_U, log_pdet, rank)\n    dev = x - loc\n    maha = np.square(np.dot(dev, prec_U)).sum(axis=-1)\n    t = 0.5 * (df + dim)\n    A = gammaln(t)\n    B = gammaln(0.5 * df)\n    C = dim / 2.0 * np.log(df * np.pi)\n    D = 0.5 * log_pdet\n    E = -t * np.log(1 + 1.0 / df * maha)\n    return _squeeze_output(A - B - C - D + E)",
    "docstring": "Utility method , for parameters. Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function. loc : ndarray Location of the distribution. prec_U : ndarray A decomposition such that is the inverse of the shape matrix. log_pdet : float Logarithm of the determinant of the shape matrix. df : float Degrees of freedom of the distribution. dim : int Dimension of the quantiles x. rank : int Rank of the shape matrix. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_logpdf arg:self arg:x arg:loc arg:prec_U arg:log_pdet arg:df arg:dim arg:rank arguments arg arg arg arg arg arg arg arg If Compare Return return:yes Call Assign Assign Call Call Call Assign Assign Call Assign Call Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_restore_function",
    "source_code": "def _restore_function(fn: Callable, fn_module: types.ModuleType) -> None:\n    global _original_functions\n    global _wrapper_functions\n    if fn not in _replaced_functions:\n        return\n    original_name, original_fn = _replaced_functions[fn]\n    setattr(fn_module, original_name, original_fn)",
    "docstring": "Restore the function that is replaced by _distribute_function.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py",
    "ast_data": "FunctionDef name:_restore_function arg:fn arg:fn_module arguments arg arg If Compare Return return:no Assign Call"
  },
  {
    "library": "django",
    "name": "autoinc_sql",
    "source_code": "def autoinc_sql(self, table, column):\n    return None",
    "docstring": "Return any SQL needed to support auto-incrementing primary keys, or None if no SQL is necessary. This SQL is executed when a table is created.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:autoinc_sql arg:self arg:table arg:column arguments arg arg arg Return return:no"
  },
  {
    "library": "scipy",
    "name": "minimiser",
    "source_code": "def minimiser(self):\n    if self.check_min:\n        self._min = all((self.f < v.f for v in self.nn))\n        self.check_min = False\n    return self._min",
    "docstring": "Check whether this vertex is strictly less than all its neighbours",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:minimiser arg:self arguments arg If Assign Call Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "maybe_cast_str_impl",
    "source_code": "@overload(maybe_cast_str)\ndef maybe_cast_str_impl(x):\n    if isinstance(x, types.UnicodeCharSeq):\n        return lambda x: str(x)\n    else:\n        return lambda x: x",
    "docstring": "Converts numba UnicodeCharSeq (numpy string scalar) -> unicode type (string). Is a no-op for other types.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\_numba\\extensions.py",
    "ast_data": "FunctionDef name:maybe_cast_str_impl arg:x arguments arg If Call Return return:yes arguments arg Call Return return:yes arguments arg Call"
  },
  {
    "library": "pandas",
    "name": "ExpandingGroupby",
    "source_code": "class ExpandingGroupby(BaseWindowGroupby, Expanding):\n    _attributes = Expanding._attributes + BaseWindowGroupby._attributes\n\n    def _get_window_indexer(self) -> GroupbyIndexer:\n        window_indexer = GroupbyIndexer(groupby_indices=self._grouper.indices, window_indexer=ExpandingIndexer)\n        return window_indexer",
    "docstring": "Provide a expanding groupby implementation.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\window\\expanding.py",
    "ast_data": "ClassDef name:ExpandingGroupby Assign FunctionDef name:_get_window_indexer arg:self arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_reduction_index_coeffs",
    "source_code": "@cache_on_self\ndef _get_reduction_index_coeffs(self) -> list[sympy.Expr]:\n    rn_prefixes = self.get_reduction_prefixes()\n    rn_numels = self._get_reduction_symbols('numel', integer=True, positive=True)\n    return [sympy_product(rn_numels[idx + 1:]) for idx in range(len(rn_prefixes) - 1)] + [sympy.Integer(1)]",
    "docstring": "Compute coefficients to convert ND reduction indices to linear indices. For example: rindex = r0_index * r1_numel * ... * rn_numel + ... + rn_index.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:_get_reduction_index_coeffs arg:self arguments arg Assign Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_cast_forward_inputs",
    "source_code": "def _cast_forward_inputs(dtype: Optional[torch.dtype], *args: Any, **kwargs: Any) -> tuple[Any, Any]:\n    if dtype is None:\n        return (args, kwargs)\n\n    def cast_fn(x: torch.Tensor) -> torch.Tensor:\n        if not torch.is_floating_point(x) or x.dtype == dtype:\n            return x\n        return x.to(dtype)\n    return (_apply_to_tensors(cast_fn, args), _apply_to_tensors(cast_fn, kwargs))",
    "docstring": "Cast floating point tensors in `` on the tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\utils.py",
    "ast_data": "FunctionDef name:_cast_forward_inputs arg:dtype arguments arg arg arg If Compare Return return:yes FunctionDef name:cast_fn arg:x arguments arg If BoolOp Call Compare Return return:yes Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_index_as_unique",
    "source_code": "@property\ndef _index_as_unique(self) -> bool:\n    return self.is_unique",
    "docstring": "Whether we should treat this as unique for the sake of get_indexer vs get_indexer_non_unique. For IntervalIndex compat.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_index_as_unique arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "InventoryAdapter",
    "source_code": "class InventoryAdapter:\n\n    def __init__(self, env: BuildEnvironment) -> None:\n        self.env = env\n        if not hasattr(env, 'intersphinx_cache'):\n            self.env.intersphinx_cache = {}\n            self.env.intersphinx_inventory = {}\n            self.env.intersphinx_named_inventory = {}\n\n    @property\n    def cache(self) -> dict[InventoryURI, InventoryCacheEntry]:\n        return self.env.intersphinx_cache\n\n    @property\n    def main_inventory(self) -> Inventory:\n        return self.env.intersphinx_inventory\n\n    @property\n    def named_inventory(self) -> dict[InventoryName, Inventory]:\n        return self.env.intersphinx_named_inventory\n\n    def clear(self) -> None:\n        self.env.intersphinx_inventory.clear()\n        self.env.intersphinx_named_inventory.clear()",
    "docstring": "Inventory adapter for environment",
    "type": "class",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_shared.py",
    "ast_data": "ClassDef name:InventoryAdapter FunctionDef name:__init__ arg:self arg:env arguments arg arg Assign If Call Assign Assign Assign FunctionDef name:cache arg:self arguments arg Return return:yes FunctionDef name:main_inventory arg:self arguments arg Return return:yes FunctionDef name:named_inventory arg:self arguments arg Return return:yes FunctionDef name:clear arg:self arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "AutoHeightChar",
    "source_code": "class AutoHeightChar(Hlist):\n\n    def __init__(self, c: str, height: float, depth: float, state: ParserState, always: bool=False, factor: float | None=None):\n        alternatives = state.fontset.get_sized_alternatives_for_symbol(state.font, c)\n        xHeight = state.fontset.get_xheight(state.font, state.fontsize, state.dpi)\n        state = state.copy()\n        target_total = height + depth\n        for fontname, sym in alternatives:\n            state.font = fontname\n            char = Char(sym, state)\n            if char.height + char.depth >= target_total - 0.2 * xHeight:\n                break\n        shift = 0.0\n        if state.font != 0 or len(alternatives) == 1:\n            if factor is None:\n                factor = target_total / (char.height + char.depth)\n            state.fontsize *= factor\n            char = Char(sym, state)\n            shift = depth - char.depth\n        super().__init__([char])\n        self.shift_amount = shift",
    "docstring": "A character as close to the given height and depth as possible. When using a font with multiple height versions of some characters (such as the BaKoMa fonts), the correct glyph will be selected, otherwise this will always just return a scaled version of the glyph.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:AutoHeightChar FunctionDef name:__init__ arg:self arg:c arg:height arg:depth arg:state arg:always arg:factor arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign For Assign Assign Call If Compare Assign If BoolOp Compare Compare Call If Compare Assign Assign Call Assign Call Call Assign"
  },
  {
    "library": "pandas",
    "name": "__len__",
    "source_code": "def __len__(self) -> int:\n    raise AbstractMethodError(self)",
    "docstring": "Length of this array Returns ------- length : int",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "__gt__",
    "source_code": "def __gt__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return None\n    else:\n        return self._value > other.value",
    "docstring": "Returns True if is known to be greater than . Dimensions are compared as follows: Args: other: Another Dimension. Returns: The value of if both are known, otherwise None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__gt__ arg:self arg:other arguments arg arg Assign Call If BoolOp Compare Compare Return return:no Return return:yes Compare"
  },
  {
    "library": "scikit-learn",
    "name": "sparse_coef_",
    "source_code": "@property\ndef sparse_coef_(self):\n    return sparse.csr_matrix(self.coef_)",
    "docstring": "Sparse representation of the fitted .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py",
    "ast_data": "FunctionDef name:sparse_coef_ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "convert",
    "source_code": "@_export_metrics\ndef convert(self):\n    if self.experimental_lower_to_saved_model:\n        saved_model_convert_result = self._convert_as_saved_model()\n        if saved_model_convert_result:\n            return saved_model_convert_result\n    graph_def, input_tensors, output_tensors, frozen_func = self._freeze_concrete_function()\n    graph_def = self._optimize_tf_model(graph_def, input_tensors, output_tensors, frozen_func)\n    return super(TFLiteFrozenGraphConverterV2, self).convert(graph_def, input_tensors, output_tensors)",
    "docstring": "Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format. Raises: ValueError: No concrete function is specified. Multiple concrete functions are specified. Input shape is not specified. Invalid quantization parameters.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:convert arg:self arguments arg If Assign Call If Return return:yes Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "update_resharding_callback",
    "source_code": "def update_resharding_callback(self, callback: checkpoint_adapter.ReshardCallback):\n    if not issubclass(checkpoint_adapter.ReshardCallback, type(self.callback)):\n        raise TypeError('Cannot override resharding callback, already set to non trivial.')\n    self.callback = callback",
    "docstring": "Add a resharding callback to the checkpoint. This will be applied to the checkpoint value before being supplied to the restore ops. Args: callback: Reshard callback for resharding this checkpoint position. Maybe None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:update_resharding_callback arg:self arg:callback arguments arg arg If Call Call Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "read_metadata",
    "source_code": "@abc.abstractmethod\ndef read_metadata(self) -> Metadata:\n    pass",
    "docstring": "Read the checkpoint metadata. Returns: The metadata object associated with the checkpoint being loaded.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:read_metadata arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "_beta_loss_to_float",
    "source_code": "def _beta_loss_to_float(beta_loss):\n    beta_loss_map = {'frobenius': 2, 'kullback-leibler': 1, 'itakura-saito': 0}\n    if isinstance(beta_loss, str):\n        beta_loss = beta_loss_map[beta_loss]\n    return beta_loss",
    "docstring": "Convert string beta_loss to float.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py",
    "ast_data": "FunctionDef name:_beta_loss_to_float arg:beta_loss arguments arg Assign If Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_mutable",
    "source_code": "def is_mutable(self):\n    return not self.is_immutable()",
    "docstring": "Whether Dynamo allows mutation on this variable.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "FunctionDef name:is_mutable arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "escapejs",
    "source_code": "@keep_lazy(SafeString)\ndef escapejs(value):\n    return mark_safe(str(value).translate(_js_escapes))",
    "docstring": "Hex encode characters for use in JavaScript strings.",
    "type": "function",
    "file_path": "django\\django\\utils\\html.py",
    "ast_data": "FunctionDef name:escapejs arg:value arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "put_operand",
    "source_code": "def put_operand(stuff, index, sort, operand, aggregation):\n    if sort is None:\n        stuff[index] = _LiteSingleOperand(operand)\n    else:\n        if index not in stuff:\n            stuff[index] = _LiteAggregateOperand(aggregation)\n        stuff[index].add(sort, operand)",
    "docstring": "Add a given index into the function structure.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:put_operand arg:stuff arg:index arg:sort arg:operand arg:aggregation arguments arg arg arg arg arg If Compare Assign Call If Compare Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "rec_drop_fields",
    "source_code": "@array_function_dispatch(_rec_drop_fields_dispatcher)\ndef rec_drop_fields(base, drop_names):\n    return drop_fields(base, drop_names, usemask=False, asrecarray=True)",
    "docstring": "Returns a new numpy.recarray with fields in dropped.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\recfunctions.py",
    "ast_data": "FunctionDef name:rec_drop_fields arg:base arg:drop_names arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_findfile",
    "source_code": "def _findfile(self, path):\n    if not self._isurl(path):\n        filelist = self._possible_names(path)\n        filelist += self._possible_names(self.abspath(path))\n    else:\n        filelist = self._possible_names(self.abspath(path))\n        filelist = filelist + self._possible_names(path)\n    for name in filelist:\n        if self.exists(name):\n            if self._isurl(name):\n                name = self._cache(name)\n            return name\n    return None",
    "docstring": "Searches for `` and returns full path if found. If path is an URL, _findfile will cache a local copy and return the path to the cached file. If path is a local file, _findfile will return a path to that local file. The search will include possible compressed versions of the file and return the first occurrence found.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:_findfile arg:self arg:path arguments arg arg If Call Assign Call Call Call Assign Call Call Assign Call For If Call If Call Assign Call Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "clear_executor_errors",
    "source_code": "def clear_executor_errors(self):\n    if self._context_handle:\n        pywrap_tfe.TFE_ContextClearExecutors(self._context_handle)\n    else:\n        raise ValueError('Context is not initialized.')",
    "docstring": "Clear errors in both local executors and remote workers. After receiving errors from remote workers, additional requests on the fly could further taint the status on the remote workers due to the async nature of remote execution. Calling this method block on waiting for all pending nodes in remote executors to finish and clear their error statuses. Raises: ValueError: if context is not initialized.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:clear_executor_errors arg:self arguments arg If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_dataset",
    "source_code": "@tf_export('data.experimental.parse_example_dataset')\n@deprecation.deprecated(None, 'Use `tf.data.Dataset.map(tf.io.parse_example(...))` instead.')\ndef parse_example_dataset(features, num_parallel_calls=1, deterministic=None):\n    if features is None:\n        raise ValueError('Argument `features` is required, but not specified.')\n\n    def _apply_fn(dataset):\n        out_dataset = _ParseExampleDataset(dataset, features, num_parallel_calls, deterministic)\n        if any((isinstance(feature, parsing_ops.SparseFeature) or isinstance(feature, parsing_ops.RaggedFeature) for feature in features.values())):\n            out_dataset = out_dataset.map(lambda x: parsing_ops._construct_tensors_for_composite_features(features, x), num_parallel_calls=num_parallel_calls)\n        return out_dataset\n    return _apply_fn",
    "docstring": "A transformation that parses protos into a of tensors. Parses a number of serialized protos given in . We refer to as a batch with many entries of individual protos. This op parses serialized examples into a dictionary mapping keys to , , and objects. is a dict from keys to , , , and objects. Each and is mapped to a ; each is mapped to a ; and each is mapped to a . See for more details about feature dictionaries. Args: features: A mapping feature keys to , , , and values. num_parallel_calls: (Optional.) A scalar , representing the number of parsing processes to call in parallel. deterministic: (Optional.) A boolean controlling whether determinism should be traded for performance by allowing elements to be produced out of order if some parsing calls complete faster than others. If is , the dataset option ( by default) is used to decide whether to produce elements deterministically. Returns: A dataset transformation function, which can be passed to . Raises: ValueError: if features argument is None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\parsing_ops.py",
    "ast_data": "FunctionDef name:parse_example_dataset arg:features arg:num_parallel_calls arg:deterministic arguments arg arg arg If Compare Raise Call FunctionDef name:_apply_fn arg:dataset arguments arg Assign Call If Call BoolOp Call Call Call Assign Call arguments arg Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "complex",
    "source_code": "@tf_export('dtypes.complex', 'complex')\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef complex(real, imag, name=None):\n    real = ops.convert_to_tensor(real, name='real')\n    imag = ops.convert_to_tensor(imag, name='imag')\n    with ops.name_scope(name, 'Complex', [real, imag]) as name:\n        input_types = (real.dtype, imag.dtype)\n        if input_types == (dtypes.float64, dtypes.float64):\n            Tout = dtypes.complex128\n        elif input_types == (dtypes.float32, dtypes.float32):\n            Tout = dtypes.complex64\n        else:\n            raise TypeError(f'The `real` and `imag` components have incorrect types: {real.dtype.name} {imag.dtype.name}. They must be consistent, and one of {[dtypes.float32, dtypes.float64]}')\n        return gen_math_ops._complex(real, imag, Tout=Tout, name=name)",
    "docstring": "Converts two real numbers to a complex number. Given a tensor representing the real part of a complex number, and a tensor representing the imaginary part of a complex number, this operation returns complex numbers elementwise of the form \\\\(a + bj\\\\), where *a* represents the part and *b* represents the part. The input tensors and must have the same shape. For example: Args: real: A . Must be one of the following types: , . imag: A . Must have the same type as . name: A name for the operation (optional). Returns: A of type or . Raises: TypeError: Real and imag must be correct types",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:complex arg:real arg:imag arg:name arguments arg arg arg Assign Call Assign Call With Call Assign If Compare Assign If Compare Assign Raise Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_data",
    "source_code": "def set_data(self, pts, y=None):\n    if y is not None:\n        x = pts\n        pts = np.array([x, y])\n    self._markers.set_data(pts)",
    "docstring": "Set x and y positions of handles.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:set_data arg:self arg:pts arg:y arguments arg arg arg If Compare Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "bijector",
    "source_code": "@property\ndef bijector(self):\n    return self._bijector",
    "docstring": "Function transforming x => y.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:bijector arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "x",
    "source_code": "@property\ndef x(self) -> Tensor:\n    return self.keypoints[:, 0]",
    "docstring": "Accesses the x coordinates of keypoints (along image width).",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\disk\\structs.py",
    "ast_data": "FunctionDef name:x arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "evaluate",
    "source_code": "def evaluate(self) -> object:\n    if not self._is_aligned:\n        self.result_type, self.aligned_axes, self.result_name = align_terms(self.expr.terms)\n    res = self._evaluate()\n    return reconstruct_object(self.result_type, res, self.aligned_axes, self.expr.terms.return_type, self.result_name)",
    "docstring": "Run the engine on the expression. This method performs alignment which is necessary no matter what engine is being used, thus its implementation is in the base class. Returns ------- object The result of the passed expression.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\engines.py",
    "ast_data": "FunctionDef name:evaluate arg:self arguments arg If Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_to_tensor_v1",
    "source_code": "def convert_to_tensor_v1(value, dtype=None, name=None, preferred_dtype=None, dtype_hint=None) -> tensor_lib.Tensor:\n    preferred_dtype = deprecation.deprecated_argument_lookup('dtype_hint', dtype_hint, 'preferred_dtype', preferred_dtype)\n    return convert_to_tensor_v2(value, dtype, preferred_dtype, name)",
    "docstring": "Converts the given to a (with the TF1 API).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_conversion.py",
    "ast_data": "FunctionDef name:convert_to_tensor_v1 arg:value arg:dtype arg:name arg:preferred_dtype arg:dtype_hint arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_antialiased",
    "source_code": "def set_antialiased(self, antialiased):\n    self._antialiased = antialiased\n    self.stale = True",
    "docstring": "Set whether to use antialiased rendering. Parameters ---------- antialiased : bool Notes ----- Antialiasing will be determined by :rc: and the parameter *antialiased* will have no effect if the text contains math expressions.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_antialiased arg:self arg:antialiased arguments arg arg Assign Assign"
  },
  {
    "library": "numpy",
    "name": "compare",
    "source_code": "def compare(all_dict, others, names, module_name):\n    only_all = set()\n    for name in all_dict:\n        if name not in names:\n            for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:\n                if re.match(pat, module_name + '.' + name):\n                    break\n            else:\n                only_all.add(name)\n    only_ref = set()\n    missing = set()\n    for name in names:\n        if name not in all_dict:\n            for pat in REFGUIDE_ALL_SKIPLIST:\n                if re.match(pat, module_name + '.' + name):\n                    if name not in others:\n                        missing.add(name)\n                    break\n            else:\n                only_ref.add(name)\n    return (only_all, only_ref, missing)",
    "docstring": "Return sets of objects from all_dict. Will return three sets: {in module_name.__all__}, {in REFGUIDE*}, and {missing from others} Parameters ---------- all_dict : list List of non deprecated sub modules for module_name others : list List of sub modules for module_name names : set Set of function names or special directives present in docstring of module_name module_name : ModuleType Returns ------- only_all : set only_ref : set missing : set",
    "type": "function",
    "file_path": "numpy\\tools\\refguide_check.py",
    "ast_data": "FunctionDef name:compare arg:all_dict arg:others arg:names arg:module_name arguments arg arg arg arg Assign Call For If Compare For If Call Call Assign Call Assign Call For If Compare For If Call If Compare Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_columns",
    "source_code": "@abstractmethod\ndef get_columns(self) -> Iterable[Column]:\n    pass",
    "docstring": "Return an iterator yielding the columns.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:get_columns arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "flat_captures",
    "source_code": "@property\ndef flat_captures(self) -> List[trace.TraceType]:\n    if not hasattr(self, '_cached_flat_captures'):\n        cached_flat_captures = []\n        for t in self.captures.values():\n            cached_flat_captures.extend(t.flatten())\n        self._cached_flat_captures = cached_flat_captures\n    return self._cached_flat_captures",
    "docstring": "Flat tensor captures needed by this FunctionType.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:flat_captures arg:self arguments arg If Call Assign For Call Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_intersection",
    "source_code": "def _intersection(self, other, sort: bool=False):\n    if self.left.is_unique and self.right.is_unique:\n        taken = self._intersection_unique(other)\n    elif other.left.is_unique and other.right.is_unique and (self.isna().sum() <= 1):\n        taken = other._intersection_unique(self)\n    else:\n        taken = self._intersection_non_unique(other)\n    if sort:\n        taken = taken.sort_values()\n    return taken",
    "docstring": "intersection specialized to the case with matching dtypes.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:_intersection arg:self arg:other arg:sort arguments arg arg arg If BoolOp Assign Call If BoolOp Compare Call Call Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cluster_spec",
    "source_code": "def cluster_spec(self):\n    task_list = []\n    self._gpu_allocation = []\n    self._cluster_allocation = {}\n    for host, num_tasks in sorted(self._task_configuration.items()):\n        for port_offset, gpu_offset in zip(range(num_tasks), range(0, self._gpus_per_node, self._gpus_per_task)):\n            host_addr = '%s:%d' % (host, self._port_base + port_offset)\n            task_list.append(host_addr)\n            gpu_id_list = []\n            for gpu_id in range(gpu_offset, gpu_offset + self._gpus_per_task):\n                gpu_id_list.append(str(gpu_id))\n            self._gpu_allocation.append(','.join(gpu_id_list))\n    cluster_rank_offset_start = 0\n    cluster_rank_offset_end = 0\n    for task_type, num_tasks in sorted(self._jobs.items()):\n        cluster_rank_offset_end = cluster_rank_offset_start + num_tasks\n        self._cluster_allocation[task_type] = task_list[cluster_rank_offset_start:cluster_rank_offset_end]\n        if cluster_rank_offset_start <= self._rank < cluster_rank_offset_end:\n            self.task_type = task_type\n            self.task_id = self._rank - cluster_rank_offset_start\n        cluster_rank_offset_start = cluster_rank_offset_end\n    if self._auto_set_gpu:\n        os.environ['CUDA_VISIBLE_DEVICES'] = self._gpu_allocation[self._rank]\n    return ClusterSpec(self._cluster_allocation)",
    "docstring": "Returns a ClusterSpec object based on the latest instance group info. This returns a ClusterSpec object for use based on information from the specified initialization parameters and Slurm environment variables. The cluster specification is resolved each time this function is called. The resolver extract hostnames of nodes by scontrol and pack tasks in that order until a node a has number of tasks that is equal to specification. GPUs on nodes are allocated to tasks by specification through setting CUDA_VISIBLE_DEVICES environment variable. Returns: A ClusterSpec containing host information retrieved from Slurm's environment variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\slurm_cluster_resolver.py",
    "ast_data": "FunctionDef name:cluster_spec arg:self arguments arg Assign Assign Assign For Call Call For Call Call Call Assign Call Assign For Call Call Call Call Call Assign Assign For Call Call Assign Assign If Compare Assign Assign Assign If Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_run_between_graph_client",
    "source_code": "def _run_between_graph_client(worker_fn, strategy, eval_fn, eval_strategy, cluster_spec, session_config, rpc_layer):\n    coord = coordinator.Coordinator()\n    eval_thread = None\n    if _TaskType.EVALUATOR in cluster_spec.jobs:\n        eval_thread = threading.Thread(target=_run_single_worker, args=(eval_fn, eval_strategy, cluster_spec, _TaskType.EVALUATOR, 0, session_config), kwargs={'rpc_layer': rpc_layer, 'coord': coord})\n        eval_thread.start()\n    threads = []\n    worker_barrier = _Barrier(_get_num_workers(cluster_spec))\n    for task_type in [_TaskType.CHIEF, _TaskType.WORKER]:\n        for task_id in range(len(cluster_spec.as_dict().get(task_type, []))):\n            t = threading.Thread(target=_run_single_worker, args=(worker_fn, strategy, cluster_spec, task_type, task_id, session_config), kwargs={'rpc_layer': rpc_layer, 'worker_barrier': worker_barrier, 'coord': coord})\n            t.start()\n            threads.append(t)\n    if eval_thread:\n        threads_to_join = threads + [eval_thread]\n    else:\n        threads_to_join = threads\n    coord.join(threads_to_join)\n    return None",
    "docstring": "Runs a standalone client for between-graph replication.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:_run_between_graph_client arg:worker_fn arg:strategy arg:eval_fn arg:eval_strategy arg:cluster_spec arg:session_config arg:rpc_layer arguments arg arg arg arg arg arg arg Assign Call Assign If Compare Assign Call Call Assign Assign Call Call For For Call Call Call Call Assign Call Call Call If Assign Assign Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "_init_line",
    "source_code": "def _init_line(self):\n    tran = self._axis_artist_helper.get_line_transform(self.axes) + self.offset_transform\n    axisline_style = self.get_axisline_style()\n    if axisline_style is None:\n        self.line = PathPatch(self._axis_artist_helper.get_line(self.axes), color=mpl.rcParams['axes.edgecolor'], fill=False, linewidth=mpl.rcParams['axes.linewidth'], capstyle=mpl.rcParams['lines.solid_capstyle'], joinstyle=mpl.rcParams['lines.solid_joinstyle'], transform=tran)\n    else:\n        self.line = axisline_style(self, transform=tran)",
    "docstring": "Initialize the *line* artist that is responsible to draw the axis line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:_init_line arg:self arguments arg Assign Call Assign Call If Compare Assign Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "extern",
    "source_code": "def extern(self, include: 'GlobPattern', *, exclude: 'GlobPattern'=(), allow_empty: bool=True):\n    self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo(_ModuleProviderAction.EXTERN, allow_empty)",
    "docstring": "Include `mockclose`, no such exception is thrown.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:extern arg:self arg:include arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "symmetric_kl_divergence",
    "source_code": "def symmetric_kl_divergence(predicted, actual):\n    epsilon = tf.constant(1e-07, dtype=tf.float32, name='epsilon')\n    p = tf.math.maximum(predicted, epsilon)\n    q = tf.math.maximum(actual, epsilon)\n    kld_1 = tf.math.reduce_sum(tf.math.multiply(p, tf.math.log(tf.math.divide(p, q))))\n    kld_2 = tf.math.reduce_sum(tf.math.multiply(q, tf.math.log(tf.math.divide(q, p))))\n    return tf.add(kld_1, kld_2)",
    "docstring": "Calculate symmetric KL-divergence over two classification tensors. Note that here the classifications do not form a probability distribution. They are, however normalized to 0..1 and calculating a KL-divergence over them gives reasonable numerical results. Shape of the two inputs must be the same at inference time but is otherwise unconstrained. Args: predicted: classification outputs from model actual: golden classification outputs Returns: Single scalar tensor with symmetric KL-divergence between predicted and actual.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\experimental\\acceleration\\mini_benchmark\\metrics\\kl_divergence.py",
    "ast_data": "FunctionDef name:symmetric_kl_divergence arg:predicted arg:actual arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_create_batch_matrices",
    "source_code": "def _create_batch_matrices(m: int, n: int, k: int, b: int, lda: int, ldb: int, ldc: int, transA: bool, transB: bool, dtype: torch.dtype, deviceid: str, subMatrix: bool=False) -> tuple[torch.Tensor, torch.Tensor]:\n    if subMatrix:\n        rowsA = rowsB = max(ldc, k)\n        matA = torch.randn(b, rowsA, lda, dtype=dtype, device=deviceid)\n        matB = torch.randn(b, rowsB, ldb, dtype=dtype, device=deviceid)\n        subA = matA[:b, :k, :m].transpose(1, 2) if transA else matA[:b, :m, :k]\n        subB = matB[:b, :n, :k].transpose(1, 2) if transB else matB[:b, :k, :n]\n        return (subA, subB)\n    else:\n        matA = torch.rand(b, k, m, dtype=dtype, device=deviceid) if transA else torch.rand(b, m, k, dtype=dtype, device=deviceid)\n        matB = torch.rand(b, n, k, dtype=dtype, device=deviceid) if transB else torch.rand(b, k, n, dtype=dtype, device=deviceid)\n        matA = matA.transpose(1, 2) if transA else matA\n        matB = matB.transpose(1, 2) if transB else matB\n        return (matA, matB)",
    "docstring": "Helper function for _process_single_offline_gemm. Creates batch matrices that are then consumed by one of the Torch GEMM APIs. Similar to _create_matrices but for 3D batch matrices.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:_create_batch_matrices arg:m arg:n arg:k arg:b arg:lda arg:ldb arg:ldc arg:transA arg:transB arg:dtype arg:deviceid arg:subMatrix arguments arg arg arg arg arg arg arg arg arg arg arg arg If Assign Call Assign Call Assign Call Assign Call Assign Call Return return:yes Assign Call Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_DeprecationHelperStr",
    "source_code": "class _DeprecationHelperStr:\n\n    def __init__(self, content, message):\n        self._content = content\n        self._message = message\n\n    def __hash__(self):\n        return hash(self._content)\n\n    def __eq__(self, other):\n        res = self._content == other\n        if res:\n            warnings.warn(self._message, category=DeprecationWarning, stacklevel=2)\n        return res",
    "docstring": "Helper class used by deprecate_cython_api",
    "type": "class",
    "file_path": "scipy\\scipy\\_lib\\deprecation.py",
    "ast_data": "ClassDef name:_DeprecationHelperStr FunctionDef name:__init__ arg:self arg:content arg:message arguments arg arg arg Assign Assign FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Assign Compare If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "slice_inputs",
    "source_code": "def slice_inputs(self, indices_dataset, inputs):\n    dataset = dataset_ops.DatasetV2.zip((indices_dataset, dataset_ops.DatasetV2.from_tensors(inputs).repeat()))\n\n    def grab_batch(i, data):\n        return nest.map_structure(lambda d: array_ops.gather(d, i, axis=0), data)\n    dataset = dataset.map(grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)\n    options = options_lib.Options()\n    options.experimental_optimization.apply_default_optimizations = False\n    if self._shuffle:\n        options.experimental_external_state_policy = options_lib.ExternalStatePolicy.IGNORE\n    dataset = dataset.with_options(options)\n    return dataset",
    "docstring": "Slice inputs into a Dataset of batches. Given a Dataset of batch indices and the unsliced inputs, this step slices the inputs in a parallelized fashion and produces a dataset of input batches. Args: indices_dataset: A Dataset of batched indices inputs: A python data structure that contains the inputs, targets, and possibly sample weights. Returns: A Dataset of input batches matching the batch indices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:slice_inputs arg:self arg:indices_dataset arg:inputs arguments arg arg arg Assign Call Call Call FunctionDef name:grab_batch arg:i arg:data arguments arg arg Return return:yes Call arguments arg Call Assign Call Assign Call Assign If Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "gdal_version",
    "source_code": "def gdal_version():\n    return _version_info(b'RELEASE_NAME')",
    "docstring": "Return only the GDAL version number information.",
    "type": "function",
    "file_path": "django\\django\\contrib\\gis\\gdal\\libgdal.py",
    "ast_data": "FunctionDef name:gdal_version arguments Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "check_parent_directory",
    "source_code": "def check_parent_directory(path: Path | str) -> None:\n    parent = Path(path).parent\n    if not parent.is_dir():\n        raise OSError(f\"Cannot save file into a non-existent directory: '{parent}'\")",
    "docstring": "Check if parent directory of a file exists, raise OSError if it does not Parameters ---------- path: Path or str Path to check parent directory of",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:check_parent_directory arg:path arguments arg Assign Call If Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_clip_box",
    "source_code": "def set_clip_box(self, clipbox):\n    _api.check_isinstance((BboxBase, None), clipbox=clipbox)\n    if clipbox != self.clipbox:\n        self.clipbox = clipbox\n        self.pchanged()\n        self.stale = True",
    "docstring": "Set the artist's clip . Parameters ---------- clipbox : or None Will typically be created from a . For instance, `` is the default clipping for an artist added to an Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:set_clip_box arg:self arg:clipbox arguments arg arg Call If Compare Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, value):\n    if not (isinstance(value, tensor.Tensor) and value.dtype.is_floating):\n        raise ValueError('Regression output value must be a float32 Tensor; got {}'.format(value))\n    self._value = value",
    "docstring": "Constructor for . Args: value: a float giving the predicted values. Required. Raises: ValueError: if the value is not a with dtype tf.float32.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:value arguments arg arg If BoolOp Call Raise Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_quantized_conv_bn_example_inputs_kwargs",
    "source_code": "def _get_quantized_conv_bn_example_inputs_kwargs(is_per_channel: bool, has_bias: bool, bias_is_quantized: bool, is_cuda: bool) -> dict[str, Any]:\n    kwargs = {}\n    if is_per_channel:\n        kwargs['weight_scale'] = torch.tensor([1], dtype=torch.float)\n        kwargs['weight_zero_point'] = torch.tensor([0], dtype=torch.int)\n        if has_bias and bias_is_quantized:\n            kwargs['bias_scale'] = torch.tensor([1], dtype=torch.float)\n            kwargs['bias_zero_point'] = torch.tensor([0], dtype=torch.int)\n    if has_bias:\n        kwargs['conv_bias'] = torch.randn(1)\n    if is_cuda:\n        for k, v in kwargs.items():\n            if isinstance(v, torch.Tensor):\n                kwargs[k] = v.cuda()\n    return kwargs",
    "docstring": "Optional example inputs for quantized and folded conv-bn patterns used in convert, expressed as kwargs.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_get_quantized_conv_bn_example_inputs_kwargs arg:is_per_channel arg:has_bias arg:bias_is_quantized arg:is_cuda arguments arg arg arg arg Assign If Assign Call Assign Call If BoolOp Assign Call Assign Call If Assign Call If For Call If Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "watch_key",
    "source_code": "@property\ndef watch_key(self):\n    return _get_tensor_watch_key(self.node_name, self.output_slot, self.debug_op)",
    "docstring": "Watch key identities a debug watch on a tensor. Returns: () A watch key, in the form of :.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:watch_key arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "DjangoRangeDumper",
    "source_code": "class DjangoRangeDumper(RangeDumper):\n\n    def upgrade(self, obj, format):\n        dumper = super().upgrade(obj, format)\n        if dumper is not self and dumper.oid == TSRANGE_OID:\n            dumper.oid = TSTZRANGE_OID\n        return dumper",
    "docstring": "A Range dumper customized for Django.",
    "type": "class",
    "file_path": "django\\django\\db\\backends\\postgresql\\psycopg_any.py",
    "ast_data": "ClassDef name:DjangoRangeDumper FunctionDef name:upgrade arg:self arg:obj arg:format arguments arg arg arg Assign Call Call If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__from_arrow__",
    "source_code": "def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> PeriodArray:\n    import pyarrow\n    from pandas.core.arrays import PeriodArray\n    from pandas.core.arrays.arrow._arrow_utils import pyarrow_array_to_numpy_and_mask\n    if isinstance(array, pyarrow.Array):\n        chunks = [array]\n    else:\n        chunks = array.chunks\n    results = []\n    for arr in chunks:\n        data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64))\n        parr = PeriodArray(data.copy(), dtype=self, copy=False)\n        parr[~mask] = NaT\n        results.append(parr)\n    if not results:\n        return PeriodArray(np.array([], dtype='int64'), dtype=self, copy=False)\n    return PeriodArray._concat_same_type(results)",
    "docstring": "Construct PeriodArray from pyarrow Array/ChunkedArray.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:__from_arrow__ arg:self arg:array arguments arg arg If Call Assign Assign Assign For Assign Call Call Assign Call Call Assign Call If Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "ProjectionRegistry",
    "source_code": "class ProjectionRegistry:\n\n    def __init__(self):\n        self._all_projection_types = {}\n\n    def register(self, *projections):\n        for projection in projections:\n            name = projection.name\n            self._all_projection_types[name] = projection\n\n    def get_projection_class(self, name):\n        return self._all_projection_types[name]\n\n    def get_projection_names(self):\n        return sorted(self._all_projection_types)",
    "docstring": "A mapping of registered projection names to projection classes.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\__init__.py",
    "ast_data": "ClassDef name:ProjectionRegistry FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:register arg:self arguments arg arg For Assign Assign FunctionDef name:get_projection_class arg:self arg:name arguments arg arg Return return:yes FunctionDef name:get_projection_names arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_get_target_ids",
    "source_code": "def _get_target_ids(self, target_field_name, objs):\n    from django.db.models import Model\n    target_ids = set()\n    target_field = self.through._meta.get_field(target_field_name)\n    for obj in objs:\n        if isinstance(obj, self.model):\n            if not router.allow_relation(obj, self.instance):\n                raise ValueError('Cannot add \"%r\": instance is on database \"%s\", value is on database \"%s\"' % (obj, self.instance._state.db, obj._state.db))\n            target_id = target_field.get_foreign_related_value(obj)[0]\n            if target_id is None:\n                raise ValueError('Cannot add \"%r\": the value for field \"%s\" is None' % (obj, target_field_name))\n            target_ids.add(target_id)\n        elif isinstance(obj, Model):\n            raise TypeError(\"'%s' instance expected, got %r\" % (self.model._meta.object_name, obj))\n        else:\n            target_ids.add(target_field.get_prep_value(obj))\n    return target_ids",
    "docstring": "Return the set of ids of that the target field references.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py",
    "ast_data": "FunctionDef name:_get_target_ids arg:self arg:target_field_name arg:objs arguments arg arg arg Assign Call Assign Call For If Call If Call Raise Call Assign Call If Compare Raise Call Call If Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "master",
    "source_code": "@abc.abstractmethod\ndef master(self, task_type=None, task_id=None, rpc_layer=None):\n    raise NotImplementedError()",
    "docstring": "Retrieves the name or URL of the session master. Note: this is only useful for TensorFlow 1.x. Args: task_type: (Optional) The type of the TensorFlow task of the master. task_id: (Optional) The index of the TensorFlow task of the master. rpc_layer: (Optional) The RPC protocol for the given cluster. Returns: The name or URL of the session master. Implementors of this function must take care in ensuring that the master returned is up-to-date at the time to calling this function. This usually means retrieving the master every time this function is invoked.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:master arg:self arg:task_type arg:task_id arg:rpc_layer arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pandas",
    "name": "nbytes",
    "source_code": "@cache_readonly\ndef nbytes(self) -> int:\n    rng = self._range\n    return getsizeof(rng) + sum((getsizeof(getattr(rng, attr_name)) for attr_name in ['start', 'stop', 'step']))",
    "docstring": "Return the number of bytes in the underlying data.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:nbytes arg:self arguments arg Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_update_bracket",
    "source_code": "def _update_bracket(ab, fab, c, fc):\n    fa, fb = fab\n    idx = 0 if np.sign(fa) * np.sign(fc) > 0 else 1\n    rx, rfx = (ab[idx], fab[idx])\n    fab[idx] = fc\n    ab[idx] = c\n    return (rx, rfx)",
    "docstring": "Update a bracket given (c, fc), return the discarded endpoints.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:_update_bracket arg:ab arg:fab arg:c arg:fc arguments arg arg arg arg Assign Assign Compare Call Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sum_tensors",
    "source_code": "@staticmethod\ndef sum_tensors(arg: Any) -> int:\n    total_memory = 0\n\n    def sum_bytes(t: torch.Tensor) -> None:\n        nonlocal total_memory\n        total_memory += t.untyped_storage().nbytes()\n    tree_map_only(torch.Tensor, sum_bytes, arg)\n    return total_memory",
    "docstring": "Calculate total memory consumed by the tensors in the argument.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\fake_collectives.py",
    "ast_data": "FunctionDef name:sum_tensors arg:arg arguments arg Assign FunctionDef name:sum_bytes arg:t arguments arg Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "laplacian_1d",
    "source_code": "def laplacian_1d(window_size: int, *, device: Optional[Device]=None, dtype: Dtype=torch.float32) -> Tensor:\n    filter_1d = torch.ones(window_size, device=device, dtype=dtype)\n    middle = window_size // 2\n    filter_1d[middle] = 1 - window_size\n    return filter_1d",
    "docstring": "One could also use the Laplacian of Gaussian formula to design the filter.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:laplacian_1d arg:window_size arguments arg arg arg Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_sharded",
    "source_code": "def is_sharded(self) -> bool:\n    return any((placement.is_shard() for placement in self.placements))",
    "docstring": "return True if the current DTensorSpec is sharded on any mesh dims (devices)",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_dtensor_spec.py",
    "ast_data": "FunctionDef name:is_sharded arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_numpy_array",
    "source_code": "def is_numpy_array(x: object) -> TypeGuard[npt.NDArray[Any]]:\n    cls = cast(Hashable, type(x))\n    return (_issubclass_fast(cls, 'numpy', 'ndarray') or _issubclass_fast(cls, 'numpy', 'generic')) and (not _is_jax_zero_gradient_array(x))",
    "docstring": "Return True if is a NumPy array. This function does not import NumPy if it has not already been imported and is therefore cheap to use. This also returns True for subclasses and NumPy scalar objects. See Also -------- array_namespace is_array_api_obj is_cupy_array is_torch_array is_ndonnx_array is_dask_array is_jax_array is_pydata_sparse_array",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py",
    "ast_data": "FunctionDef name:is_numpy_array arg:x arguments arg Assign Call Call Return return:yes BoolOp BoolOp Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_scramble",
    "source_code": "def _scramble(self) -> None:\n    self._shift = np.dot(rng_integers(self.rng, 2, size=(self.d, self.bits), dtype=self.dtype_i), 2 ** np.arange(self.bits, dtype=self.dtype_i))\n    ltm = np.tril(rng_integers(self.rng, 2, size=(self.d, self.bits, self.bits), dtype=self.dtype_i))\n    _cscramble(dim=self.d, bits=self.bits, ltm=ltm, sv=self._sv)",
    "docstring": "Scramble the sequence using LMS+shift.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:_scramble arg:self arguments arg Assign Call Call Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "DeviceWrapperBase",
    "source_code": "class DeviceWrapperBase(object):\n\n    def __init__(self, cell, device, **kwargs):\n        super(DeviceWrapperBase, self).__init__(cell, **kwargs)\n        self._device = device\n\n    @property\n    def state_size(self):\n        return self.cell.state_size\n\n    @property\n    def output_size(self):\n        return self.cell.output_size\n\n    def zero_state(self, batch_size, dtype):\n        with ops.name_scope_v2(type(self).__name__ + 'ZeroState'):\n            with ops.device(self._device):\n                return self.cell.zero_state(batch_size, dtype)\n\n    def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):\n        with ops.device(self._device):\n            return cell_call_fn(inputs, state, **kwargs)\n\n    def get_config(self):\n        config = {'device': self._device}\n        base_config = super(DeviceWrapperBase, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Operator that ensures an RNNCell runs on a particular device.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py",
    "ast_data": "ClassDef name:DeviceWrapperBase FunctionDef name:__init__ arg:self arg:cell arg:device arguments arg arg arg arg Call Call Assign FunctionDef name:state_size arg:self arguments arg Return return:yes FunctionDef name:output_size arg:self arguments arg Return return:yes FunctionDef name:zero_state arg:self arg:batch_size arg:dtype arguments arg arg arg With Call Call With Call Return return:yes Call FunctionDef name:_call_wrapped_cell arg:self arg:inputs arg:state arg:cell_call_fn arguments arg arg arg arg arg With Call Return return:yes Call FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_tensor_dim_sharded",
    "source_code": "def is_tensor_dim_sharded(spec: DTensorSpec, dim: int) -> bool:\n    return any((p.is_shard(dim) for p in spec.placements))",
    "docstring": "Return True if tensor dim is sharded.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\utils.py",
    "ast_data": "FunctionDef name:is_tensor_dim_sharded arg:spec arg:dim arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "def decision_function(self, X):\n    check_is_fitted(self)\n    xp, _ = get_namespace(X)\n    X = validate_data(self, X, accept_sparse='csr', reset=False)\n    scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_\n    return xp.reshape(scores, (-1,)) if scores.ndim > 1 and scores.shape[1] == 1 else scores",
    "docstring": "Predict confidence scores for samples. The confidence score for a sample is proportional to the signed distance of that sample to the hyperplane. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data matrix for which we want to get the confidence scores. Returns ------- scores : ndarray of shape (n_samples,) or (n_samples, n_classes) Confidence scores per combination. In the binary case, confidence score for where >0 means this class would be predicted.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Call Return return:yes BoolOp Compare Compare Call"
  },
  {
    "library": "virtualenv",
    "name": "can_create",
    "source_code": "@classmethod\ndef can_create(cls, interpreter):\n    return True",
    "docstring": "Determine if we can create a virtual environment. :param interpreter: the interpreter in question :return: `add_parser_arguments`",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\create\\creator.py",
    "ast_data": "FunctionDef name:can_create arg:cls arg:interpreter arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "create_exception",
    "source_code": "def create_exception(self, source_error):\n    preferred_type = type(source_error)\n    to_ret = None\n    if preferred_type.__init__ is Exception.__init__:\n        to_ret = preferred_type(self.get_message())\n    if preferred_type in KNOWN_STRING_CONSTRUCTOR_ERRORS:\n        to_ret = preferred_type(self.get_message())\n    elif preferred_type is KeyError:\n        to_ret = MultilineMessageKeyError(self.get_message(), self.cause_message)\n    if to_ret is not None:\n        return to_ret.with_traceback(source_error.__traceback__)",
    "docstring": "Creates exception from source_error.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\error_utils.py",
    "ast_data": "FunctionDef name:create_exception arg:self arg:source_error arguments arg arg Assign Call Assign If Compare Assign Call Call If Compare Assign Call Call If Compare Assign Call Call If Compare Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_bbox",
    "source_code": "def get_bbox(self):\n    return transforms.Bbox.from_extents(*self._convert_units())",
    "docstring": "Return the .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_bbox arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_implementation",
    "source_code": "@doc_controls.for_subclass_implementers\ndef reduce_implementation(self, reduce_op, per_replica_value, destinations, options):\n    raise NotImplementedError('_reduce method must be implemented in descendants.')",
    "docstring": "Implementation of . Overriding this method is useful for subclass implementers. Args: reduce_op: a specifying how values should be combined. per_replica_value: a , or a like object. destinations: a , a , a alike object, or a device string. It specifies the devices to reduce to. To perform an all-reduce, pass the same to and . Note that if it's a , the value is reduced to the devices of that variable, this method doesn't update the variable. options: a . See for details. Returns: A or . Raises: ValueError: if per_replica_value can't be converted to a or if destinations is not a string, or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:reduce_implementation arg:self arg:reduce_op arg:per_replica_value arg:destinations arg:options arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "capabilities",
    "source_code": "def capabilities(self):\n    return {'boolean indexing': True, 'data-dependent shapes': True}",
    "docstring": "Return a dictionary of array API library capabilities. The resulting dictionary has the following keys: - **\"boolean indexing\"**: boolean indicating whether an array library supports boolean indexing. Always `` for NumPy. See for more details. See Also -------- __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- capabilities : dict A dictionary of array API library capabilities. Examples -------- >>> info = np.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, 'data-dependent shapes': True}",
    "type": "method",
    "file_path": "numpy\\numpy\\_array_api_info.py",
    "ast_data": "FunctionDef name:capabilities arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_spec",
    "source_code": "@abc.abstractproperty\ndef parse_example_spec(self):\n    pass",
    "docstring": "Returns a parsing spec as dict. It is used for get_parsing_spec for . Returned spec is a dict from keys ('string') to , , and other supported objects. Please check documentation of for all supported spec objects. Let's say a Feature column depends on raw feature ('raw') and another (input_fc). One possible implementation of parse_example_spec is as follows:",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2_types.py",
    "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_copy_trackable_to_cpu",
    "source_code": "def _copy_trackable_to_cpu(self, object_map):\n    if self not in object_map:\n        op_device = pydev.DeviceSpec.from_string(self.device).replace(device_type='CPU', device_index=0).to_string()\n        with ops.device(op_device):\n            new_var = resource_variable_ops.UninitializedVariable(trainable=self.trainable, shape=self.shape, dtype=self.dtype, name=self._shared_name, distribute_strategy=self._distribute_strategy, aggregation=self._aggregation)\n        object_map[self] = new_var\n    destination_var = object_map[self]\n    with ops.device(destination_var.device):\n        destination_var.assign(self.read_value())",
    "docstring": "For implementing .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg If Compare Assign Call Call Call With Call Assign Call Assign Assign With Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "isclose",
    "source_code": "def isclose(a: Array | complex, b: Array | complex, *, rtol: float=1e-05, atol: float=1e-08, equal_nan: bool=False, xp: ModuleType | None=None) -> Array:\n    xp = array_namespace(a, b) if xp is None else xp\n    if _delegate(xp, Backend.NUMPY, Backend.CUPY, Backend.DASK, Backend.JAX):\n        return xp.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)\n    if _delegate(xp, Backend.TORCH):\n        a, b = asarrays(a, b, xp=xp)\n        return xp.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)\n    return _funcs.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan, xp=xp)",
    "docstring": "Return a boolean array where two arrays are element-wise equal within a tolerance. The tolerance values are positive, typically very small numbers. The relative difference `atolabababababatolisclosemath.iscloseabatolbatolabababisclose` is considered a numeric data-type for this purpose.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_delegation.py",
    "ast_data": "FunctionDef name:isclose arg:a arg:b arguments arg arg arg arg arg arg Assign Compare Call If Call Return return:yes Call If Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "rewrap",
    "source_code": "def rewrap(self) -> None:\n    self.measured_widths = self.colwidth[:]\n    for cell in self.cells:\n        cell.wrap(width=self.cell_width(cell, self.colwidth))\n        if not cell.wrapped:\n            continue\n        if cell.row is None or cell.col is None:\n            msg = 'Cell co-ordinates have not been set'\n            raise ValueError(msg)\n        width = math.ceil(max((column_width(x) for x in cell.wrapped)) / cell.colspan)\n        for col in range(cell.col, cell.col + cell.colspan):\n            self.measured_widths[col] = max(self.measured_widths[col], width)",
    "docstring": "Call ``).",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "FunctionDef name:rewrap arg:self arguments arg Assign For Call Call If If BoolOp Compare Compare Assign Raise Call Assign Call Call Call For Call Assign Call"
  },
  {
    "library": "sphinx",
    "name": "transform_content",
    "source_code": "def transform_content(self, content_node: addnodes.desc_content) -> None:\n    field_list = nodes.field_list()\n    if 'type' in self.options:\n        field, msgs = self.format_type(self.options['type'])\n        field_list.append(field)\n        field_list += msgs\n    if 'default' in self.options:\n        field, msgs = self.format_default(self.options['default'])\n        field_list.append(field)\n        field_list += msgs\n    if len(field_list.children) > 0:\n        content_node.insert(0, field_list)",
    "docstring": "Insert *type* and *default* as a field list.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "FunctionDef name:transform_content arg:self arg:content_node arguments arg arg Assign Call If Compare Assign Call Call If Compare Assign Call Call If Compare Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "@available_if(_estimator_has('predict_proba', delegates=('final_estimator_', 'final_estimator')))\ndef predict_proba(self, X):\n    check_is_fitted(self)\n    y_pred = self.final_estimator_.predict_proba(self.transform(X))\n    if isinstance(self._label_encoder, list):\n        y_pred = np.array([preds[:, 0] for preds in y_pred]).T\n    return y_pred",
    "docstring": "Predict class probabilities for using the final estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Returns ------- probabilities : ndarray of shape (n_samples, n_classes) or list of ndarray of shape (n_output,) The class probabilities of the input samples.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Call If Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "__call__",
    "source_code": "def __call__(self, x):\n    with np.errstate(invalid='ignore'):\n        return umath.less(umath.absolute(umath.cos(x)), self.eps)",
    "docstring": "Executes the call behavior.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg With Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_TensorData",
    "source_code": "class _TensorData(collections.namedtuple('_TensorData', ['numpy', 'dtype', 'index'])):\n    __slots__ = ()\n\n    @property\n    def dtype_attr(self):\n        return attr_value_pb2.AttrValue(type=self.dtype)",
    "docstring": "Data about a tensor that was converted to a constant.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_TensorData Call Assign FunctionDef name:dtype_attr arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "LazyInstanceNorm1d",
    "source_code": "class LazyInstanceNorm1d(_LazyNormBase, _InstanceNorm):\n    cls_to_become = InstanceNorm1d\n\n    def _get_no_batch_dim(self):\n        return 2\n\n    def _check_input_dim(self, input):\n        if input.dim() not in (2, 3):\n            raise ValueError(f'expected 2D or 3D input (got {input.dim()}D input)')",
    "docstring": "A :class: module with lazy initialization of the `InstanceNorm1dweightbiasrunning_meanrunning_vartorch.nn.modules.lazy.LazyModuleMixinC(N, C, L)(C, L)(N, C, L)(C, L)(N, C, L)(C, L)` (same shape as input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\instancenorm.py",
    "ast_data": "ClassDef name:LazyInstanceNorm1d Assign FunctionDef name:_get_no_batch_dim arg:self arguments arg Return return:yes FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If Compare Call Raise Call Call"
  },
  {
    "library": "seaborn",
    "name": "_get_groups",
    "source_code": "def _get_groups(self, data: DataFrame) -> tuple[str | list[str], Index | MultiIndex]:\n    levels = {}\n    for var, order in self.order.items():\n        if var in data:\n            if order is None:\n                order = categorical_order(data[var])\n            levels[var] = order\n    grouper: str | list[str]\n    groups: Index | MultiIndex\n    if not levels:\n        grouper = []\n        groups = pd.Index([])\n    elif len(levels) > 1:\n        grouper = list(levels)\n        groups = pd.MultiIndex.from_product(levels.values(), names=grouper)\n    else:\n        grouper, = list(levels)\n        groups = pd.Index(levels[grouper], name=grouper)\n    return (grouper, groups)",
    "docstring": "Return index with Cartesian product of ordered grouping variable levels.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\groupby.py",
    "ast_data": "FunctionDef name:_get_groups arg:self arg:data arguments arg arg Assign For Call If Compare If Compare Assign Call Assign If Assign Assign Call If Compare Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_should_skip_first_node",
    "source_code": "def _should_skip_first_node(layer):\n    if layer._self_tracked_trackables:\n        return isinstance(layer, Functional) and isinstance(layer._self_tracked_trackables[0], input_layer_module.InputLayer)\n    else:\n        return isinstance(layer, Functional)",
    "docstring": "Returns True if the first layer node should not be saved or loaded.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:_should_skip_first_node arg:layer arguments arg If Return return:yes BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "set_values",
    "source_code": "def set_values(self, values: ArrayLike) -> None:\n    self.blocks[0].values = values\n    self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values)))",
    "docstring": "Set the values of the single block in place. Use at your own risk! This does not check if the passed values are valid for the current Block/SingleBlockManager (length, dtype, etc), and this does not properly keep track of references.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:set_values arg:self arg:values arguments arg arg Assign Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_multi_take_opportunity",
    "source_code": "def _multi_take_opportunity(self, tup: tuple) -> bool:\n    if not all((is_list_like_indexer(x) for x in tup)):\n        return False\n    return not any((com.is_bool_indexer(x) for x in tup))",
    "docstring": "Check whether there is the possibility to use `_multi_take`.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_multi_take_opportunity arg:self arg:tup arguments arg arg If Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self) -> DtypeObj:\n    return self._mgr.dtype",
    "docstring": "Return the dtype object of the underlying data. See Also -------- Series.dtypes : Return the dtype object of the underlying data. Series.astype : Cast a pandas object to a specified dtype dtype. Series.convert_dtypes : Convert columns to the best possible dtypes using dtypes supporting pd.NA. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "cache_method",
    "source_code": "def cache_method(f: Callable[Concatenate[_C, _P], _T]) -> Callable[Concatenate[_C, _P], _T]:\n    cache_name = '_cache_method_' + f.__name__\n\n    @functools.wraps(f)\n    def wrap(self: _C, *args: _P.args, **kwargs: _P.kwargs) -> _T:\n        assert not kwargs\n        if not (cache := getattr(self, cache_name, None)):\n            cache = {}\n            setattr(self, cache_name, cache)\n        cached_value = cache.get(args, _cache_sentinel)\n        if cached_value is not _cache_sentinel:\n            return cached_value\n        value = f(self, *args, **kwargs)\n        cache[args] = value\n        return value\n    return wrap",
    "docstring": "Like but for methods. (and similarly ) shouldn't be used on methods because it caches , keeping it alive forever. ignores so won't keep alive (assuming no cycles with in the parameters). Footgun warning: This decorator completely ignores self's properties so only use it when you know that self is frozen or won't change in a meaningful way (such as the wrapped function being pure).",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_functools.py",
    "ast_data": "FunctionDef name:cache_method arg:f arguments arg Assign FunctionDef name:wrap arg:self arguments arg arg arg If Call Assign Call Assign Call If Compare Return return:yes Assign Call Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_flat",
    "source_code": "def is_flat(outputs):\n    if isinstance(outputs, collections_abc.Sequence):\n        for o in outputs:\n            if isinstance(o, collections_abc.Sequence) or isinstance(o, collections_abc.Mapping) or hasattr(o.__class__, '__attrs_attrs__'):\n                return False\n    if isinstance(outputs, collections_abc.Mapping):\n        return False\n    if hasattr(outputs.__class__, '__attrs_attrs__'):\n        return False\n    return True",
    "docstring": "Checks if outputs is a flat structure. Following structures and values are considered flat: 1) None 2) A single object 3) A list or tuple of Tensors/Operations The only structures that this function understands are sequences, dictionaries and types defined using the attrs library. E.g. this means that if outputs contains a single user-defined Object, it is considered to be flat. Errors are raised later on if that Object cannot be converted to a Tensor. Args: outputs: Output from inside . Returns: A boolean indicates whether outputs is flat.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\xla.py",
    "ast_data": "FunctionDef name:is_flat arg:outputs arguments arg If Call For If BoolOp Call Call Call Return return:yes If Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PerBlock",
    "source_code": "@dataclass(frozen=True)\nclass PerBlock(Granularity):\n    block_size: tuple[int, ...]",
    "docstring": "Represents per-block granularity in quantization. See :func: for docs for Attributes: block_size (Tuple[int, ...]): The size of each quantization group",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:PerBlock Call"
  },
  {
    "library": "sphinx",
    "name": "desc_sig_keyword",
    "source_code": "class desc_sig_keyword(desc_sig_element, _sig_element=True):\n    classes = ['k']",
    "docstring": "Node for a general keyword in a signature.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_sig_keyword Assign"
  },
  {
    "library": "pytorch",
    "name": "extract_target",
    "source_code": "def extract_target(node: torch.fx.Node) -> torch.fx.node.Target:\n    if node.op == 'call_module':\n        assert isinstance(node.target, str)\n        return _get_attr(node.graph.owning_module, node.target).__class__\n    return node.target",
    "docstring": "For call_function and call_method, we directly use the target function; For call_module, the target is string, and we treat the module class as a function.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "FunctionDef name:extract_target arg:node arguments arg If Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "print_tensor",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef print_tensor(x, message='', summarize=3):\n    if isinstance(x, tensor_lib.Tensor) and hasattr(x, 'graph'):\n        with get_graph().as_default():\n            op = logging_ops.print_v2(message, x, output_stream=sys.stdout, summarize=summarize)\n            with ops.control_dependencies([op]):\n                return array_ops.identity(x)\n    else:\n        logging_ops.print_v2(message, x, output_stream=sys.stdout, summarize=summarize)\n        return x",
    "docstring": "Prints and the tensor value when evaluated. Note that returns a new tensor identical to which should be used in the following code. Otherwise the print operation is not taken into account during evaluation. Example: >>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]]) >>> _ = tf.keras.backend.print_tensor(x) [[1 2] [3 4]] Args: x: Tensor to print. message: Message to print jointly with the tensor. summarize: The first and last elements within each dimension are recursively printed per Tensor. If None, then the first 3 and last 3 elements of each dimension are printed for each tensor. If set to -1, it will print all elements of every tensor. Returns: The same tensor , unchanged.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:print_tensor arg:x arg:message arg:summarize arguments arg arg arg If BoolOp Call Call With Call Call Assign Call With Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "F",
    "source_code": "class F(Constraint):\n\n    def __init__(self) -> None:\n        pass\n\n    def __eq__(self, other):\n        return isinstance(other, F)\n\n    def __repr__(self):\n        return 'False'",
    "docstring": "False",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "ClassDef name:F FunctionDef name:__init__ arg:self arguments arg FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_HookTimer",
    "source_code": "class _HookTimer:\n\n    def __init__(self):\n        pass\n\n    def reset(self):\n        pass\n\n    def should_trigger_for_step(self, step):\n        raise NotImplementedError\n\n    def update_last_triggered_step(self, step):\n        raise NotImplementedError\n\n    def last_triggered_step(self):\n        raise NotImplementedError",
    "docstring": "Base timer for determining when Hooks should trigger. Should not be instantiated directly.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "ClassDef name:_HookTimer FunctionDef name:__init__ arg:self arguments arg FunctionDef name:reset arg:self arguments arg FunctionDef name:should_trigger_for_step arg:self arg:step arguments arg arg Raise FunctionDef name:update_last_triggered_step arg:self arg:step arguments arg arg Raise FunctionDef name:last_triggered_step arg:self arguments arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "latent_mean_and_variance",
    "source_code": "def latent_mean_and_variance(self, X):\n    check_is_fitted(self)\n    K_star = self.kernel_(self.X_train_, X)\n    latent_mean = K_star.T.dot(self.y_train_ - self.pi_)\n    v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star)\n    latent_var = self.kernel_.diag(X) - np.einsum('ij,ij->j', v, v)\n    return (latent_mean, latent_var)",
    "docstring": "Compute the mean and variance of the latent function values. Based on algorithm 3.2 of [RW2006]_, this function returns the latent mean (Line 4) and variance (Line 6) of the Gaussian process classification model. Note that this function is only supported for binary classification. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- latent_mean : array-like of shape (n_samples,) Mean of the latent function values at the query points. latent_var : array-like of shape (n_samples,) Variance of the latent function values at the query points.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\_gpc.py",
    "ast_data": "FunctionDef name:latent_mean_and_variance arg:self arg:X arguments arg arg Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_func_name",
    "source_code": "def get_func_name(func):\n    _, func = tf_decorator.unwrap(func)\n    if callable(func):\n        if tf_inspect.isfunction(func):\n            return func.__name__\n        elif tf_inspect.ismethod(func):\n            return '%s.%s' % (func.__self__.__class__.__name__, func.__func__.__name__)\n        else:\n            return str(type(func))\n    else:\n        raise ValueError(f'Argument `func` must be a callable. Received func={func} (of type {type(func)})')",
    "docstring": "Returns name of passed callable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\function_utils.py",
    "ast_data": "FunctionDef name:get_func_name arg:func arguments arg Assign Call If Call If Call Return return:yes If Call Return return:yes Return return:yes Call Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_add_main_op",
    "source_code": "def _maybe_add_main_op(self, main_op):\n    if main_op is None:\n        return\n    if not isinstance(main_op, ops.Operation):\n        raise TypeError(f'Expected {main_op} to be an Operation but got type {type(main_op)} instead.')\n    for init_op_key in (constants.MAIN_OP_KEY, constants.LEGACY_INIT_OP_KEY):\n        if ops.get_collection(init_op_key):\n            raise ValueError(f'Graph already contains one or more main ops under the collection {init_op_key}.')\n    ops.add_to_collection(constants.MAIN_OP_KEY, main_op)",
    "docstring": "Adds main op to the SavedModel. Args: main_op: Main op to run as part of graph initialization. If None, no main op will be added to the graph. Raises: TypeError: If the main op is provided but is not of type . ValueError: if the Graph already contains an init op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py",
    "ast_data": "FunctionDef name:_maybe_add_main_op arg:self arg:main_op arguments arg arg If Compare Return return:no If Call Raise Call Call For If Call Raise Call Call"
  },
  {
    "library": "django",
    "name": "simple_tag",
    "source_code": "def simple_tag(self, func=None, takes_context=None, name=None):\n\n    def dec(func):\n        params, varargs, varkw, defaults, kwonly, kwonly_defaults, _ = getfullargspec(unwrap(func))\n        function_name = name or func.__name__\n\n        @wraps(func)\n        def compile_func(parser, token):\n            bits = token.split_contents()[1:]\n            target_var = None\n            if len(bits) >= 2 and bits[-2] == 'as':\n                target_var = bits[-1]\n                bits = bits[:-2]\n            args, kwargs = parse_bits(parser, bits, params, varargs, varkw, defaults, kwonly, kwonly_defaults, takes_context, function_name)\n            return SimpleNode(func, takes_context, args, kwargs, target_var)\n        self.tag(function_name, compile_func)\n        return func\n    if func is None:\n        return dec\n    elif callable(func):\n        return dec(func)\n    else:\n        raise ValueError('Invalid arguments provided to simple_tag')",
    "docstring": "Register a callable as a compiled template tag. Example: @register.simple_tag def hello(*args, **kwargs): return 'world'",
    "type": "method",
    "file_path": "django\\django\\template\\library.py",
    "ast_data": "FunctionDef name:simple_tag arg:self arg:func arg:takes_context arg:name arguments arg arg arg arg FunctionDef name:dec arg:func arguments arg Assign Call Call Assign BoolOp FunctionDef name:compile_func arg:parser arg:token arguments arg arg Assign Call Assign If BoolOp Compare Call Compare Assign Assign Assign Call Return return:yes Call Call Call Return return:yes If Compare Return return:yes If Call Return return:yes Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "_getcol",
    "source_code": "def _getcol(self, i):\n    M, N = self.shape\n    i = int(i)\n    if i < -N or i >= N:\n        raise IndexError(f'index ({i}) out of range')\n    if i < 0:\n        i += N\n    return self._get_sliceXint(slice(None), i)",
    "docstring": "Return a copy of column i of the matrix, as a (m x 1) column vector.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_index.py",
    "ast_data": "FunctionDef name:_getcol arg:self arg:i arguments arg arg Assign Assign Call If BoolOp Compare Compare Raise Call If Compare Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "add_index_to_domain",
    "source_code": "def add_index_to_domain(self, domain: str, index: type[Index], _override: bool=False) -> None:\n    self.registry.add_index_to_domain(domain, index)",
    "docstring": "Register a custom index for a domain. Add a custom *index* class to the domain named *domain*. :param domain: The name of the target domain :param index: The index class :param override: If false, do not install it if another index is already installed as the same name If true, unconditionally install the index. .. versionadded:: 1.0 .. versionchanged:: 1.8 Add *override* keyword.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_index_to_domain arg:self arg:domain arg:index arg:_override arguments arg arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "Mishra11",
    "source_code": "class Mishra11(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = [(-3, 3), (-3, 3)]\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        N = self.N\n        return (1.0 / N * sum(abs(x)) - prod(abs(x)) ** 1.0 / N) ** 2.0",
    "docstring": "Mishra 11 objective function. This class defines the Mishra 11 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Mishra11}}(x) = \\left [ \\frac{1}{n} \\sum_{i=1}^{n} \\lvert x_i \\rvert - \\left(\\prod_{i=1}^{n} \\lvert x_i \\rvert \\right )^{\\frac{1}{n}} \\right]^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py",
    "ast_data": "ClassDef name:Mishra11 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_unscale_parameters",
    "source_code": "def _unscale_parameters(self, parameters):\n    return (parameters - self.__scale_arg1) * self.__recip_scale_arg2 + 0.5",
    "docstring": "Scale from parameters to a number between 0 and 1.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:_unscale_parameters arg:self arg:parameters arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ReshapeToInput",
    "source_code": "def _ReshapeToInput(op: ops.Operation, grad):\n    return array_ops.reshape(_IndexedSlicesToTensorNoWarning(grad), array_ops.shape(op.inputs[0]))",
    "docstring": "Reshapes the gradient to the shape of the original input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_ReshapeToInput arg:op arg:grad arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "within_grid",
    "source_code": "def within_grid(self, xi, yi):\n    return 0 <= xi <= self.nx - 1 and 0 <= yi <= self.ny - 1",
    "docstring": "Return whether (*xi*, *yi*) is a valid index of the grid.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\streamplot.py",
    "ast_data": "FunctionDef name:within_grid arg:self arg:xi arg:yi arguments arg arg arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "collect_callgrind",
    "source_code": "def collect_callgrind(self, task_spec: common.TaskSpec, globals: dict[str, Any], *, number: int, repeats: int, collect_baseline: bool, is_python: bool, retain_out_file: bool) -> tuple[CallgrindStats, ...]:\n    self._validate()\n    assert is_python or not collect_baseline\n    *task_stats, baseline_stats = self._invoke(task_spec=task_spec, globals=globals, number=number, repeats=repeats, collect_baseline=collect_baseline, is_python=is_python, retain_out_file=retain_out_file)\n    assert len(task_stats) == repeats\n    return tuple((CallgrindStats(task_spec=task_spec, number_per_run=number, built_with_debug_symbols=self._build_type == 'RelWithDebInfo', baseline_inclusive_stats=baseline_stats[0], baseline_exclusive_stats=baseline_stats[1], stmt_inclusive_stats=stmt_inclusive_stats, stmt_exclusive_stats=stmt_exclusive_stats, stmt_callgrind_out=out_contents) for stmt_inclusive_stats, stmt_exclusive_stats, out_contents in task_stats))",
    "docstring": "Collect stats, and attach a reference run which can be used to filter interpreter overhead.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py",
    "ast_data": "FunctionDef name:collect_callgrind arg:self arg:task_spec arg:globals arguments arg arg arg arg arg arg arg arg Call BoolOp Assign Call Compare Call Return return:yes Call Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "task_type",
    "source_code": "@property\ndef task_type(self):\n    return self._task_type",
    "docstring": "Returns the role of the corresponding task.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:task_type arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_params",
    "source_code": "def set_params(self, base=None, offset=None):\n    if base is not None:\n        self._base = base\n    if offset is not None:\n        self.offset = offset",
    "docstring": "Set parameters within this locator",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_params arg:self arg:base arg:offset arguments arg arg arg If Compare Assign If Compare Assign"
  },
  {
    "library": "django",
    "name": "__len__",
    "source_code": "def __len__(self):\n    return self.size",
    "docstring": "Return the number of points in the coordinate sequence.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter_add",
    "source_code": "@tf_export(v1=['scatter_add'])\ndef scatter_add(ref, indices, updates, use_locking=False, name=None):\n    if ref.dtype._is_ref_dtype:\n        return gen_state_ops.scatter_add(ref, indices, updates, use_locking=use_locking, name=name)\n    return ref._lazy_read(gen_resource_variable_ops.resource_scatter_add(ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name))",
    "docstring": "Adds sparse updates to the variable referenced by . This operation computes This operation outputs after the update is done. This makes it easier to chain operations that need to use the updated value. Duplicate entries are handled correctly: if multiple reference the same location, their contributions add. Requires . Args: ref: A . indices: A . Must be one of the following types: , . A tensor of indices into the first dimension of . updates: A . Must have the same type as . A tensor of updated values to store in . use_locking: An optional . Defaults to . If True, the assignment will be protected by a lock; otherwise the behavior is undefined, but may exhibit less contention. name: A name for the operation (optional). Returns: Same as . Returned as a convenience for operations that want to use the updated values after the update is done.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py",
    "ast_data": "FunctionDef name:scatter_add arg:ref arg:indices arg:updates arg:use_locking arg:name arguments arg arg arg arg arg If Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "has_xy_data",
    "source_code": "@property\ndef has_xy_data(self):\n    return bool({'x', 'y'} & set(self.variables))",
    "docstring": "Return True at least one of x or y is defined.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\distributions.py",
    "ast_data": "FunctionDef name:has_xy_data arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "remove_none",
    "source_code": "def remove_none(d: dict[str, str | None]) -> None:\n    for k, v in list(d.items()):\n        if v is None:\n            del d[k]\n        elif isinstance(v, dict):\n            remove_none(v)\n            if not v:\n                del d[k]",
    "docstring": "Remove key where value is None, through nested dicts",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\excel.py",
    "ast_data": "FunctionDef name:remove_none arg:d arguments arg For Call Call If Compare If Call Call If"
  },
  {
    "library": "pytorch",
    "name": "run_a",
    "source_code": "def run_a(self, mod: torch.fx.GraphModule, inputs: Tensors, report_idx: int=-1) -> TensorOrTensors:\n    raise RuntimeError('run_a() is not implemented.')",
    "docstring": "Run with and generate output. The output will be compared with output of run_b().",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "FunctionDef name:run_a arg:self arg:mod arg:inputs arg:report_idx arguments arg arg arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "distinct",
    "source_code": "def distinct(self, *field_names):\n    self._not_support_combined_queries('distinct')\n    if self.query.is_sliced:\n        raise TypeError('Cannot create distinct fields once a slice has been taken.')\n    obj = self._chain()\n    obj.query.add_distinct_fields(*field_names)\n    return obj",
    "docstring": "Return a new QuerySet instance that will select only distinct results.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:distinct arg:self arguments arg arg Call If Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "write_model",
    "source_code": "def write_model(model_object, output_tflite_file):\n    if sys.byteorder == 'big':\n        model_object = copy.deepcopy(model_object)\n        byte_swap_tflite_model_obj(model_object, 'big', 'little')\n    model_bytearray = convert_object_to_bytearray(model_object)\n    with gfile.GFile(output_tflite_file, 'wb') as output_file_handle:\n        output_file_handle.write(model_bytearray)",
    "docstring": "Writes the tflite model, a python object, into the output file. NOTE: This API only works for TFLite generated with _experimental_use_buffer_offset=false Args: model_object: A tflite model as a python object output_tflite_file: Full path name to the output tflite file. Raises: IOError: If output_tflite_file path is invalid or cannot be opened.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:write_model arg:model_object arg:output_tflite_file arguments arg arg If Compare Assign Call Call Assign Call With Call Call"
  },
  {
    "library": "tensorflow",
    "name": "device_name_to_device_path",
    "source_code": "def device_name_to_device_path(device_name):\n    device_name_items = compat.as_text(device_name).split('/')\n    device_name_items = [item.replace(':', '_') for item in device_name_items]\n    return METADATA_FILE_PREFIX + DEVICE_TAG + ','.join(device_name_items)",
    "docstring": "Convert device name to device path.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:device_name_to_device_path arg:device_name arguments arg Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "minimum",
    "source_code": "def minimum(inputs, **kwargs):\n    return Minimum(**kwargs)(inputs)",
    "docstring": "Functional interface to the layer. Args: inputs: A list of input tensors (at least 2). **kwargs: Standard layer keyword arguments. Returns: A tensor, the element-wise minimum of the inputs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\merge.py",
    "ast_data": "FunctionDef name:minimum arg:inputs arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "tensor_float_32_execution_enabled",
    "source_code": "@tf_export('config.experimental.tensor_float_32_execution_enabled')\ndef tensor_float_32_execution_enabled():\n    return _pywrap_tensor_float_32_execution.is_enabled()",
    "docstring": "Returns whether TensorFloat-32 is enabled. By default, TensorFloat-32 is enabled, but this can be changed with . Returns: True if TensorFloat-32 is enabled (the default) and False otherwise",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:tensor_float_32_execution_enabled arguments Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_sync_params_and_buffers",
    "source_code": "def _sync_params_and_buffers(process_group: dist.ProcessGroup, module_states: list[torch.Tensor], broadcast_bucket_size: int, src: int) -> None:\n    if len(module_states) > 0:\n        dist._broadcast_coalesced(process_group, module_states, broadcast_bucket_size, src)",
    "docstring": "Synchronize `` (list of tensors) across all processes by broadcasting them from rank 0.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\utils.py",
    "ast_data": "FunctionDef name:_sync_params_and_buffers arg:process_group arg:module_states arg:broadcast_bucket_size arg:src arguments arg arg arg arg If Compare Call Call"
  },
  {
    "library": "pandas",
    "name": "_data",
    "source_code": "@cache_readonly\ndef _data(self) -> np.ndarray:\n    return np.arange(self.start, self.stop, self.step, dtype=np.int64)",
    "docstring": "An int array that for performance reasons is created only when needed. The constructed array is saved in ``.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:_data arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_Square",
    "source_code": "class _Square(Constraint):\n    event_dim = 2\n\n    def check(self, value):\n        return torch.full(size=value.shape[:-2], fill_value=value.shape[-2] == value.shape[-1], dtype=torch.bool, device=value.device)",
    "docstring": "Constrain to square matrices.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_Square Assign FunctionDef name:check arg:self arg:value arguments arg arg Return return:yes Call Compare"
  },
  {
    "library": "authlib",
    "name": "create_endpoint_response",
    "source_code": "def create_endpoint_response(self, request):\n    client = self.authenticate_endpoint_client(request)\n    token = self.authenticate_token(request, client)\n    if token:\n        self.revoke_token(token, request)\n        self.server.send_signal('after_revoke_token', token=token, client=client)\n    return (200, {}, default_json_headers)",
    "docstring": "Validate revocation request and create the response for revocation. For example, a client may request the revocation of a refresh token with the following request:: POST /revoke HTTP/1.1 Host: server.example.com Content-Type: application/x-www-form-urlencoded Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token :returns: (status_code, body, headers)",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7009\\revocation.py",
    "ast_data": "FunctionDef name:create_endpoint_response arg:self arg:request arguments arg arg Assign Call Assign Call If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "nodes_first",
    "source_code": "def nodes_first(nodes: list[torch.fx.Node], node_call_back=None) -> Optional[torch.fx.Node]:\n    ret = nodes_filter(nodes, node_call_back if node_call_back else lambda node: True)\n    if len(ret) > 0:\n        return ret[0]\n    return None",
    "docstring": "Returns the first node that matches the node_call_back. If no node matches, returns None. When node_call_back is None, returns the first node in the node list.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:nodes_first arg:nodes arg:node_call_back arguments arg arg Assign Call arguments arg If Compare Call Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "def step(self, context_id):\n    dist_autograd._is_valid_context(context_id)\n    optimizer_step_func = _script_local_optimizer_step if self.is_functional_optim else _local_optimizer_step\n    rpc_futs = [rpc.rpc_async(optimizer.owner(), optimizer_step_func, args=(optimizer, context_id)) for optimizer in self.remote_optimizers]\n    _wait_for_all(rpc_futs)",
    "docstring": "Performs a single optimization step. This will call :meth: on each worker containing parameters to be optimized, and will block until all workers return. The provided `~torch.distributed.autograd.context` that contains the gradients that should be applied to the parameters. Args: context_id: the autograd context id for which we should run the optimizer step.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:step arg:self arg:context_id arguments arg arg Call Assign Assign Call Call Call"
  },
  {
    "library": "numpy",
    "name": "_parse_policy_werror",
    "source_code": "def _parse_policy_werror(self, has_baseline, final_targets, extra_flags):\n    flags = self.cc_flags['werror']\n    if not flags:\n        self.dist_log(\"current compiler doesn't support werror flags, warnings will 'not' treated as errors\", stderr=True)\n    else:\n        self.dist_log('compiler warnings are treated as errors')\n        extra_flags += flags\n    return (has_baseline, final_targets, extra_flags)",
    "docstring": "force warnings to treated as errors",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:_parse_policy_werror arg:self arg:has_baseline arg:final_targets arg:extra_flags arguments arg arg arg arg Assign If Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_config",
    "source_code": "@classmethod\ndef from_config(cls, config):\n    return cls(**config)",
    "docstring": "Creates the LossScale from its config.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py",
    "ast_data": "FunctionDef name:from_config arg:cls arg:config arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "AppRegistryNotReady",
    "source_code": "class AppRegistryNotReady(Exception):\n    pass",
    "docstring": "The django.apps registry is not populated yet",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:AppRegistryNotReady"
  },
  {
    "library": "tensorflow",
    "name": "on_test_begin",
    "source_code": "def on_test_begin(self, logs=None):\n    logs = self._process_logs(logs)\n    for callback in self.callbacks:\n        callback.on_test_begin(logs)",
    "docstring": "Calls the methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_test_begin arg:self arg:logs arguments arg arg Assign Call For Call"
  },
  {
    "library": "kornia",
    "name": "identity",
    "source_code": "@classmethod\ndef identity(cls, batch_size: Optional[int]=None, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> So2:\n    real_data = tensor(1.0, device=device, dtype=dtype)\n    imag_data = tensor(0.0, device=device, dtype=dtype)\n    if batch_size is not None:\n        KORNIA_CHECK(batch_size >= 1, msg='batch_size must be positive')\n        real_data = real_data.repeat(batch_size)\n        imag_data = imag_data.repeat(batch_size)\n    return cls(complex(real_data, imag_data))",
    "docstring": "Create a So2 group representing an identity rotation. Args: batch_size: the batch size of the underlying data. device: device to place the result on. dtype: dtype of the result. Example: >>> s = So2.identity(batch_size=2) >>> s Parameter containing: tensor([1.+0.j, 1.+0.j], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py",
    "ast_data": "FunctionDef name:identity arg:cls arg:batch_size arg:device arg:dtype arguments arg arg arg arg Assign Call Assign Call If Compare Call Compare Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    config = dict(zip(self._fields, self))\n    config['dtype'] = self.dtype.name\n    return config",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "adapt_ipaddressfield_value",
    "source_code": "def adapt_ipaddressfield_value(self, value):\n    return value or None",
    "docstring": "Transform a string representation of an IP address into the expected type for the backend driver.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:adapt_ipaddressfield_value arg:self arg:value arguments arg arg Return return:yes BoolOp"
  },
  {
    "library": "pytorch",
    "name": "_clear_state",
    "source_code": "def _clear_state(self) -> None:\n    self._operator_names.clear()\n    self.memories_allocated.clear()\n    self.memories_active.clear()\n    self.memories_reserved.clear()\n    self._markers.clear()\n    self._cur_module_name = ''\n    self._op_index = 0\n    self._num_cuda_retries = 0",
    "docstring": "Clear states when start_monitor() is called.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\memory_tracker.py",
    "ast_data": "FunctionDef name:_clear_state arg:self arguments arg Call Call Call Call Call Assign Assign Assign"
  },
  {
    "library": "kornia",
    "name": "point_line_distance",
    "source_code": "def point_line_distance(point: Tensor, line: Tensor, eps: float=1e-09) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(point)\n    KORNIA_CHECK_IS_TENSOR(line)\n    if point.shape[-1] not in (2, 3):\n        raise ValueError(f'pts must be a (*, 2 or 3) tensor. Got {point.shape}')\n    if line.shape[-1] != 3:\n        raise ValueError(f'lines must be a (*, 3) tensor. Got {line.shape}')\n    numerator = line[..., 0] * point[..., 0]\n    numerator += line[..., 1] * point[..., 1]\n    numerator += line[..., 2]\n    numerator.abs_()\n    denom_norm = (line[..., 0].square() + line[..., 1].square()).sqrt()\n    return numerator / (denom_norm + eps)",
    "docstring": "Return the distance from points to lines. Args: point: (possibly homogeneous) points :math:. line: lines coefficients :math: with shape :math:, where :math:. eps: Small constant for safe sqrt. Returns: the computed distance with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\linalg.py",
    "ast_data": "FunctionDef name:point_line_distance arg:point arg:line arg:eps arguments arg arg arg Call Call If Compare Raise Call If Compare Raise Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_keras_model_file",
    "source_code": "@classmethod\ndef from_keras_model_file(cls, model_file, input_arrays=None, input_shapes=None, output_arrays=None, custom_objects=None):\n    TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.KERAS_MODEL)\n    return TFLiteKerasModelConverter(model_file, input_arrays, input_shapes, output_arrays, custom_objects)",
    "docstring": "Creates a TFLiteConverter class from a tf.keras model file. Args: model_file: Full filepath of HDF5 file containing the tf.keras model. input_arrays: List of input tensors to freeze graph with. Uses input arrays from SignatureDef when none are provided. (default None) input_shapes: Dict of strings representing input tensor names to list of integers representing input shapes (e.g., {\"foo\" : [1, 16, 16, 3]}). Automatically determined when input shapes is None (e.g., {\"foo\" : None}). (default None) output_arrays: List of output tensors to freeze graph with. Uses output arrays from SignatureDef when none are provided. (default None) custom_objects: Dict mapping names (strings) to custom classes or functions to be considered during model deserialization. (default None) Returns: TFLiteConverter class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:from_keras_model_file arg:cls arg:model_file arg:input_arrays arg:input_shapes arg:output_arrays arg:custom_objects arguments arg arg arg arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "hop",
    "source_code": "@property\ndef hop(self) -> int:\n    return self._hop",
    "docstring": "Time increment in signal samples for sliding window. This attribute is read only, since depends on it. See Also -------- delta_t: Time increment of STFT (`winwinm_num`. T: Sampling interval of input signal and of the window. win: Window function as real- or complex-valued 1d array. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:hop arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "masked_fill",
    "source_code": "@_onnx_symbolic('aten::masked_fill')\ndef masked_fill(g: jit_utils.GraphContext, self, mask, value):\n    mask = g.op('Cast', mask, to_i=_C_onnx.TensorProtoDataType.BOOL)\n    value = symbolic_helper._maybe_get_scalar(value)\n    return g.op('Where', mask, symbolic_helper._if_scalar_type_as(value, self), self)",
    "docstring": "Implement the masked_fill functionality available for a pytorch tensor in ONNX. Fills elements of the input tensor with where is True.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:masked_fill arg:g arg:self arg:mask arg:value arguments arg arg arg arg Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, keys):\n    return self.lookup(keys)",
    "docstring": "Looks up in a table, outputs the corresponding values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:keys arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "CsInt",
    "source_code": "class CsInt(GEOSFuncFactory):\n    argtypes = [CS_PTR, POINTER(c_uint)]\n    restype = c_int\n    errcheck = staticmethod(check_cs_get)",
    "docstring": "For coordinate sequence routines that return an integer.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\coordseq.py",
    "ast_data": "ClassDef name:CsInt Assign Call Assign Assign Call"
  },
  {
    "library": "pandas",
    "name": "from_range",
    "source_code": "@classmethod\ndef from_range(cls, data: range, name=None, dtype: Dtype | None=None) -> Self:\n    if not isinstance(data, range):\n        raise TypeError(f'{cls.__name__}(...) must be called with object coercible to a range, {data!r} was passed')\n    cls._validate_dtype(dtype)\n    return cls._simple_new(data, name=name)",
    "docstring": "Create :class: from a `pandas.RangeIndexRangeIndex` object. It is particularly useful for constructing indices in an efficient and memory-friendly manner. Parameters ---------- data : range The range object to be converted into a RangeIndex. name : str, default None Name to be stored in the index. dtype : Dtype or None Data type for the RangeIndex. If None, the default integer type will be used. Returns ------- RangeIndex See Also -------- RangeIndex : Immutable Index implementing a monotonic integer range. Index : Immutable sequence used for indexing and alignment. Examples -------- >>> pd.RangeIndex.from_range(range(5)) RangeIndex(start=0, stop=5, step=1) >>> pd.RangeIndex.from_range(range(2, -10, -3)) RangeIndex(start=2, stop=-10, step=-3)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:from_range arg:cls arg:data arg:name arg:dtype arguments arg arg arg arg If Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "maybe_mark_dynamic",
    "source_code": "@forbid_in_graph\ndef maybe_mark_dynamic(t, index):\n    if is_traceable_wrapper_subclass(t):\n        _apply_func_to_inner_tensors_of_same_dim(maybe_mark_dynamic, t, index)\n    if isinstance(index, int):\n        if not hasattr(t, '_dynamo_weak_dynamic_indices'):\n            t._dynamo_weak_dynamic_indices = set()\n        t._dynamo_weak_dynamic_indices.add(index)\n        return\n    assert isinstance(index, (list, tuple))\n    for i in index:\n        maybe_mark_dynamic(t, i)",
    "docstring": "Mark a tensor as having a dynamic dim, but don't enforce it (i.e., if this dimension ends up getting specialized, don't error).",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\decorators.py",
    "ast_data": "FunctionDef name:maybe_mark_dynamic arg:t arg:index arguments arg arg If Call Call If Call If Call Assign Call Call Return return:no Call For Call"
  },
  {
    "library": "pytorch",
    "name": "_handle_max_norm_col_wise",
    "source_code": "def _handle_max_norm_col_wise(max_norm, norm_type, local_shard, input, world_size, gathered_inputs, pg):\n    norm_type = norm_type if norm_type is not None else 2.0\n    unique_inp = torch.unique(torch.cat(gathered_inputs))\n    local_shard_sum = torch.sum(torch.pow(torch.abs(local_shard), norm_type), dim=1, dtype=local_shard.dtype)\n    local_shard_sum = all_reduce(local_shard_sum, group=pg)\n    local_shard_norm = torch.pow(local_shard_sum, 1.0 / norm_type)\n    max_norm_tensor = torch.full((local_shard.size(0),), float('inf'), dtype=local_shard.dtype, device=input.device)\n    max_norm_tensor[unique_inp] = max_norm\n    local_shard_t = local_shard.t().contiguous()\n    normalized_tensor = torch.where(local_shard_norm > max_norm_tensor, max_norm_tensor, local_shard_norm)\n    local_shard_norm[local_shard_norm == 0.0] = 1.0\n    local_shard_norm_renormed = torch.div(torch.mul(local_shard_t, normalized_tensor), local_shard_norm).t().contiguous()\n    return local_shard_norm_renormed",
    "docstring": "For col-wise sharding of weight, we need to aggregate the norm across all ranks before we can perform the proper re-norm. Note that, the max_norm logic is only applied to the embedding indices that are looked up and not the whole shard. Args: max_norm: If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. Note: this will modify weight in-place. norm_type: The p in the p-norm to compute for the max_norm option. local_shard: col-wise shared local weight used for lookup. input: tensor to be applied op to. world_size: number of ranks. gathered_inputs: list of inputs from all ranks. pg: process group. Return: local_shard_norm_renormed: local_shard re-normed to max_norm if the norm is larger than it.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\chunk_sharding_spec_ops\\_common.py",
    "ast_data": "FunctionDef name:_handle_max_norm_col_wise arg:max_norm arg:norm_type arg:local_shard arg:input arg:world_size arg:gathered_inputs arg:pg arguments arg arg arg arg arg arg arg Assign Compare Assign Call Call Assign Call Call Call Assign Call Assign Call Assign Call Call Call Assign Assign Call Call Assign Call Compare Assign Compare Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sync_multi",
    "source_code": "def sync_multi(tensors, devices):\n    torch._C._lazy._sync_multi(tensors, devices)",
    "docstring": "Sync the list of lazy tensors so there IR get lowered for the activate backend and the compiled computation graph get cached.",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\__init__.py",
    "ast_data": "FunctionDef name:sync_multi arg:tensors arg:devices arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(identifier):\n    if identifier is None:\n        return None\n    if isinstance(identifier, dict):\n        return deserialize(identifier)\n    elif isinstance(identifier, str):\n        identifier = str(identifier)\n        return deserialize(identifier)\n    elif callable(identifier):\n        if inspect.isclass(identifier):\n            identifier = identifier()\n        return identifier\n    else:\n        raise ValueError('Could not interpret initializer identifier: ' + str(identifier))",
    "docstring": "Retrieve a Keras initializer by the identifier. The may be the string name of a initializers function or class ( case-sensitively). >>> identifier = 'Ones' >>> tf.keras.initializers.deserialize(identifier) You can also specify of the initializer to this function by passing dict containing and as an identifier. Also note that the must map to a class. >>> cfg = {'class_name': 'Ones', 'config': {}} >>> tf.keras.initializers.deserialize(cfg) In the case that the is a class, this method will return a new instance of the class by its constructor. Args: identifier: String or dict that contains the initializer name or configurations. Returns: Initializer instance base on the input identifier. Raises: ValueError: If the input identifier is not a supported type or in a bad format.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\__init__.py",
    "ast_data": "FunctionDef name:get arg:identifier arguments arg If Compare Return return:no If Call Return return:yes Call If Call Assign Call Return return:yes Call If Call If Call Assign Call Return return:yes Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "closed",
    "source_code": "@property\ndef closed(self) -> IntervalClosedType:\n    return self.dtype.closed",
    "docstring": "String describing the inclusive side the intervals. Either ``. See Also -------- IntervalArray.closed : Returns inclusive side of the IntervalArray. Interval.closed : Returns inclusive side of the Interval. IntervalIndex.closed : Returns inclusive side of the IntervalIndex. Examples -------- For arrays: >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) >>> interv_arr [(0, 1], (1, 5]] Length: 2, dtype: interval[int64, right] >>> interv_arr.closed 'right' For Interval Index: >>> interv_idx = pd.interval_range(start=0, end=2) >>> interv_idx IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]') >>> interv_idx.closed 'right'",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:closed arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "weighted_categorical_column",
    "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('feature_column.weighted_categorical_column')\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef weighted_categorical_column(categorical_column, weight_feature_key, dtype=dtypes.float32):\n    if dtype is None or not (dtype.is_integer or dtype.is_floating):\n        raise ValueError('dtype {} is not convertible to float.'.format(dtype))\n    return WeightedCategoricalColumn(categorical_column=categorical_column, weight_feature_key=weight_feature_key, dtype=dtype)",
    "docstring": "Applies weight values to a . Use this when each of your sparse inputs has both an ID and a value. For example, if you're representing text documents as a collection of word frequencies, you can provide 2 parallel sparse input features ('terms' and 'frequencies' below). Example: Input objects: This assumes the input dictionary contains a for key 'terms', and a for key 'frequencies'. These 2 tensors must have the same indices and dense shape. Args: categorical_column: A created by functions. weight_feature_key: String key for weight values. dtype: Type of weights, such as . Only float and integer weights are supported. Returns: A composed of two sparse features: one represents id, the other represents weight (value) of the id feature in that example. Raises: ValueError: if is not convertible to float.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:weighted_categorical_column arg:categorical_column arg:weight_feature_key arg:dtype arguments arg arg arg If BoolOp Compare BoolOp Raise Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "InfoLogRecordTranslator",
    "source_code": "class InfoLogRecordTranslator(SphinxLogRecordTranslator):\n    LogRecordClass = SphinxInfoLogRecord",
    "docstring": "LogRecordTranslator for INFO level log records.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "ClassDef name:InfoLogRecordTranslator Assign"
  },
  {
    "library": "tensorflow",
    "name": "process_file",
    "source_code": "def process_file(self, in_filename, out_filename, no_change_to_outfile_on_error=False):\n    with open(in_filename, 'r') as in_file, tempfile.NamedTemporaryFile('w', delete=False) as temp_file:\n        ret = self.process_opened_file(in_filename, in_file, out_filename, temp_file)\n    if no_change_to_outfile_on_error and ret[0] == 0:\n        os.remove(temp_file.name)\n    else:\n        shutil.move(temp_file.name, out_filename)\n    return ret",
    "docstring": "Process the given python file for incompatible changes. Args: in_filename: filename to parse out_filename: output file to write to no_change_to_outfile_on_error: not modify the output file on errors Returns: A tuple representing number of files processed, log of actions, errors",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:process_file arg:self arg:in_filename arg:out_filename arg:no_change_to_outfile_on_error arguments arg arg arg arg With Call Call Assign Call If BoolOp Compare Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_initiate_login_uri",
    "source_code": "def validate_initiate_login_uri(self):\n    self._validate_uri('initiate_login_uri')",
    "docstring": "RI using the https scheme that a third party can use to initiate a login by the RP, as specified in Section 4 of OpenID Connect Core 1.0 [OpenID.Core]. The URI MUST accept requests via both GET and POST. The Client MUST understand the login_hint and iss parameters and SHOULD support the target_link_uri parameter.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_initiate_login_uri arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "_currenttobest1",
    "source_code": "def _currenttobest1(self, candidate, samples):\n    r0, r1 = samples[..., :2].T\n    bprime = self.population[candidate] + self.scale * (self.population[0] - self.population[candidate] + self.population[r0] - self.population[r1])\n    return bprime",
    "docstring": "currenttobest1bin, currenttobest1exp",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:_currenttobest1 arg:self arg:candidate arg:samples arguments arg arg arg Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_in_multi_worker_mode",
    "source_code": "def _in_multi_worker_mode(self):\n    strategy = self._distribution_strategy\n    if not strategy and distribute_lib.has_strategy():\n        strategy = distribute_lib.get_strategy()\n    return strategy and strategy.extended._in_multi_worker_mode()",
    "docstring": "Method to infer if this is working in multi-worker settings. Multi-worker training refers to the setup where the training is distributed across multiple workers, as opposed to the case where only a local process performs the training. This function is used to infer for example whether or not a distribute coordinator should be run, and thus TensorFlow servers should be started for communication with other servers in the cluster, or whether or not saving/restoring checkpoints is relevant for preemption fault tolerance. Experimental. Signature and implementation are subject to change. Returns: Whether this model indicates it's working in multi-worker settings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_in_multi_worker_mode arg:self arguments arg Assign If BoolOp Call Assign Call Return return:yes BoolOp Call"
  },
  {
    "library": "tensorflow",
    "name": "register_binary_elementwise_api",
    "source_code": "def register_binary_elementwise_api(func):\n    _BINARY_ELEMENTWISE_APIS.append(func)\n    for args, handler in _ELEMENTWISE_API_HANDLERS.items():\n        if len(args) == 2:\n            _add_dispatch_for_binary_elementwise_api(func, args[0], args[1], handler)\n    return func",
    "docstring": "Decorator that registers a TensorFlow op as a binary elementwise API.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:register_binary_elementwise_api arg:func arguments arg Call For Call If Compare Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    neigh_dist, neigh_ind = self.radius_neighbors(X)\n    weights = _get_weights(neigh_dist, self.weights)\n    _y = self._y\n    if _y.ndim == 1:\n        _y = _y.reshape((-1, 1))\n    empty_obs = np.full_like(_y[0], np.nan)\n    if weights is None:\n        y_pred = np.array([np.mean(_y[ind, :], axis=0) if len(ind) else empty_obs for i, ind in enumerate(neigh_ind)])\n    else:\n        y_pred = np.array([np.average(_y[ind, :], axis=0, weights=weights[i]) if len(ind) else empty_obs for i, ind in enumerate(neigh_ind)])\n    if np.any(np.isnan(y_pred)):\n        empty_warning_msg = 'One or more samples have no neighbors within specified radius; predicting NaN.'\n        warnings.warn(empty_warning_msg)\n    if self._y.ndim == 1:\n        y_pred = y_pred.ravel()\n    return y_pred",
    "docstring": "Predict the target for the provided data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_queries, n_features), or (n_queries, n_indexed) if metric == 'precomputed', or None Test samples. If , predictions for all indexed points are returned; in this case, points are not considered their own neighbors. Returns ------- y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=double Target values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_regression.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Assign Call Assign If Compare Assign Call Assign Call If Compare Assign Call Call Call Call Assign Call Call Call Call If Call Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_batch_end",
    "source_code": "def on_batch_end(self, batch, logs=None):\n    logs = logs or {}\n    self._samples_seen += logs.get('size', 1)\n    samples_seen_since = self._samples_seen - self._samples_seen_at_last_write\n    if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:\n        batch_logs = {'batch_' + k: v for k, v in logs.items() if k not in ['batch', 'size', 'num_steps']}\n        self._write_custom_summaries(self._total_batches_seen, batch_logs)\n        self._samples_seen_at_last_write = self._samples_seen\n    self._total_batches_seen += 1\n    self._stop_profiler()",
    "docstring": "Writes scalar summaries for metrics on every training batch. Performs profiling if current batch is in profiler_batches.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks_v1.py",
    "ast_data": "FunctionDef name:on_batch_end arg:self arg:batch arg:logs arguments arg arg arg Assign BoolOp Call Assign If BoolOp Compare Compare Assign Call Compare Call Assign Call"
  },
  {
    "library": "django",
    "name": "index",
    "source_code": "def index(self, field_name):\n    i = capi.get_field_index(self.ptr, force_bytes(field_name))\n    if i < 0:\n        raise IndexError('Invalid OFT field name given: %s.' % field_name)\n    return i",
    "docstring": "Return the index of the given field name.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py",
    "ast_data": "FunctionDef name:index arg:self arg:field_name arguments arg arg Assign Call Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_from_proto",
    "source_code": "@classmethod\ndef _from_proto(cls, pb):\n    if pb == dataset_options_pb2.AutoShardPolicy.OFF:\n        return cls.OFF\n    if pb == dataset_options_pb2.AutoShardPolicy.FILE:\n        return cls.FILE\n    if pb == dataset_options_pb2.AutoShardPolicy.DATA:\n        return cls.DATA\n    if pb == dataset_options_pb2.AutoShardPolicy.AUTO:\n        return cls.AUTO\n    if pb == dataset_options_pb2.AutoShardPolicy.HINT:\n        return cls.HINT\n    raise ValueError(f'Invalid `pb.` Supported values include `OFF`, `FILE`, `DATA`,`AUTO`, and `HINT`. Got {pb}.')",
    "docstring": "Convert proto to enum.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\options.py",
    "ast_data": "FunctionDef name:_from_proto arg:cls arg:pb arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "get_mask_mod",
    "source_code": "def get_mask_mod(self, mask_mod: Optional[_mask_mod_signature]) -> _mask_mod_signature:\n    if mask_mod is None:\n        mask_mod = noop_mask\n\n    def new_mask_mod(b: torch.Tensor, h: torch.Tensor, q_idx: torch.Tensor, physical_kv_idx: torch.Tensor):\n        physical_kv_block = physical_kv_idx // self.page_size\n        physical_kv_offset = physical_kv_idx % self.page_size\n        logical_block_idx = self.physical_to_logical[b, physical_kv_block]\n        logical_kv_idx = logical_block_idx * self.page_size + physical_kv_offset\n        return torch.where(logical_block_idx >= 0, mask_mod(b, h, q_idx, logical_kv_idx), False)\n    return new_mask_mod",
    "docstring": "Converts a mask_mod based on mapping from the physical block index to the logical block index. Args: mask_mod (_mask_mod_signature): mask_mod based on the logical block index.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\attention\\experimental\\_paged_attention.py",
    "ast_data": "FunctionDef name:get_mask_mod arg:self arg:mask_mod arguments arg arg If Compare Assign FunctionDef name:new_mask_mod arg:b arg:h arg:q_idx arg:physical_kv_idx arguments arg arg arg arg Assign Assign Assign Assign Return return:yes Call Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "tree_is_leaf",
    "source_code": "def tree_is_leaf(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> bool:\n    return optree.tree_is_leaf(tree, is_leaf=is_leaf, none_is_leaf=True, namespace='torch')",
    "docstring": "Check if a pytree is a leaf. >>> tree_is_leaf(1) True >>> tree_is_leaf(None) True >>> tree_is_leaf([1, 2, 3]) False >>> tree_is_leaf((1, 2, 3), is_leaf=lambda x: isinstance(x, tuple)) True >>> tree_is_leaf({'a': 1, 'b': 2, 'c': 3}) False >>> tree_is_leaf({'a': 1, 'b': 2, 'c': None}) False Args: tree (pytree): A pytree to check if it is a leaf node. is_leaf (callable, optional): An extra leaf predicate function that will be called at each flattening step. The function should have a single argument with signature `True`, the whole subtree being treated as a leaf. Otherwise, the default pytree registry will be used to determine a node is a leaf or not. If the function is not specified, the default pytree registry will be used. Returns: A boolean indicating if the pytree is a leaf node.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:tree_is_leaf arg:tree arg:is_leaf arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_export",
    "source_code": "def _export(self, op: onnx.ModelProto, file_path: str, **kwargs: Any) -> None:\n    onnx.save(op, file_path, **kwargs)",
    "docstring": "Export the combined ONNX model to a file. Args: op: onnx operation. file_path: The file path to export the combined ONNX model. kwargs: Additional arguments to save onnx model.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\onnx.py",
    "ast_data": "FunctionDef name:_export arg:self arg:op arg:file_path arguments arg arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_set_node_metadata",
    "source_code": "def _set_node_metadata(fx_node: torch.fx.Node, ir_node: ir.Node) -> None:\n    namespace, class_hierarchy, name_scopes = _get_node_namespace(fx_node)\n    ir_node.metadata_props['namespace'] = namespace\n    ir_node.metadata_props['pkg.torch.onnx.class_hierarchy'] = repr(class_hierarchy)\n    ir_node.metadata_props['pkg.torch.onnx.name_scopes'] = repr(name_scopes)\n    ir_node.metadata_props['pkg.torch.onnx.fx_node'] = str(fx_node.format_node())\n    ir_node.metadata_props['pkg.torch.onnx.stack_trace'] = fx_node.meta.get('stack_trace', '')",
    "docstring": "Adds namespace and other node metadata to the ONNX node.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py",
    "ast_data": "FunctionDef name:_set_node_metadata arg:fx_node arg:ir_node arguments arg arg Assign Call Assign Assign Call Assign Call Assign Call Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "Hansen",
    "source_code": "class Hansen(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[-7.58989583, -7.70831466]]\n        self.fglob = -176.54179\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        i = arange(5.0)\n        a = (i + 1) * cos(i * x[0] + i + 1)\n        b = (i + 1) * cos((i + 2) * x[1] + i + 1)\n        return sum(a) * sum(b)",
    "docstring": "Hansen objective function. This class defines the Hansen [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Hansen}}(x) = \\left[ \\sum_{i=0}^4(i+1)\\cos(ix_1+i+1)\\right ] \\left[\\sum_{j=0}^4(j+1)\\cos[(j+2)x_2+j+1])\\right ] with :math: for :math:. *Global optimum*: :math: for :math:. .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Jamil #61 is missing the starting value of i.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_H.py",
    "ast_data": "ClassDef name:Hansen FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_new_shared",
    "source_code": "@classmethod\ndef _new_shared(cls, size, *, device='cpu'):\n    from torch.multiprocessing import get_sharing_strategy\n    device = torch.device(device)\n    if device.type in ['cuda', torch._C._get_privateuse1_backend_name(), 'hpu']:\n        return cls(size, device=device)\n    elif get_sharing_strategy() == 'file_system':\n        return cls._new_using_filename_cpu(size)\n    else:\n        return cls._new_using_fd_cpu(size)",
    "docstring": "Create a new storage in shared memory with the same data type.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:_new_shared arg:cls arg:size arguments arg arg arg Assign Call If Compare Call Return return:yes Call If Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    powers = self.powers_\n    input_features = _check_feature_names_in(self, input_features)\n    feature_names = []\n    for row in powers:\n        inds = np.where(row)[0]\n        if len(inds):\n            name = ' '.join(('%s^%d' % (input_features[ind], exp) if exp != 1 else input_features[ind] for ind, exp in zip(inds, row[inds])))\n        else:\n            name = '1'\n        feature_names.append(name)\n    return np.asarray(feature_names, dtype=object)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If , then is used as feature names in. If is not defined, then the following input feature names are generated: . - If is an array-like, then must match if is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_polynomial.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Assign Assign Call Assign For Assign Call If Call Assign Call Compare Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_zero_in_bounds",
    "source_code": "def _zero_in_bounds(self):\n    vmin, vmax = self._axes.yaxis._scale.limit_range_for_scale(0, 1, 1e-05)\n    return vmin == 0",
    "docstring": "Return True if zero is within the valid values for the scale of the radial axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:_zero_in_bounds arg:self arguments arg Assign Call Return return:yes Compare"
  },
  {
    "library": "scikit-learn",
    "name": "weight_intercept_raw",
    "source_code": "def weight_intercept_raw(self, coef, X):\n    weights, intercept = self.weight_intercept(coef)\n    if not self.base_loss.is_multiclass:\n        raw_prediction = X @ weights + intercept\n    else:\n        raw_prediction = X @ weights.T + intercept\n    return (weights, intercept, raw_prediction)",
    "docstring": "Helper function to get coefficients, intercept and raw_prediction. Parameters ---------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Coefficients of a linear model. If shape (n_classes * n_dof,), the classes of one feature are contiguous, i.e. one reconstructs the 2d-array via coef.reshape((n_classes, -1), order=\"F\"). X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. Returns ------- weights : ndarray of shape (n_features,) or (n_classes, n_features) Coefficients without intercept term. intercept : float or ndarray of shape (n_classes,) Intercept terms. raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_linear_loss.py",
    "ast_data": "FunctionDef name:weight_intercept_raw arg:self arg:coef arg:X arguments arg arg arg Assign Call If Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_ConsolidatedOptimState",
    "source_code": "@dataclass\nclass _ConsolidatedOptimState:\n    tensor_state: dict[str, torch.Tensor] = field(default_factory=dict)\n    zero_dim_tensor_state: dict[str, torch.Tensor] = field(default_factory=dict)\n    non_tensor_state: dict[str, Any] = field(default_factory=dict)",
    "docstring": "This holds the consolidated optimizer state on the target rank. Positive- dimension tensor state is communicated across ranks, while zero-dimension tensor state and non-tensor state is taken directly from the target rank. PyTorch version 1.12 moved to using zero-dimension tensors for scalar values, but user implemented optimizers may still use float (i.e. a non-tensor). Thus, we support both and handle them identically. Attributes: tensor_state (Dict[str, torch.Tensor]): Mapping from positive-dimension tensor state name to the unsharded flat tensor representing the state. zero_dim_tensor_state (Dict[str, torch.Tensor]): Mapping from zero- dimension tensor state name to its value. non_tensor_state (Dict[str, Any]): Mapping from non-tensor state name to its value.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_optim_utils.py",
    "ast_data": "ClassDef name:_ConsolidatedOptimState Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sharding_specs: List[str], mesh: Mesh):\n    if not isinstance(mesh, Mesh):\n        raise ValueError('mesh is not a valid Mesh object.')\n    for _, dim_sharding in enumerate(sharding_specs):\n        if dim_sharding == UNSHARDED or dim_sharding == MATCH:\n            continue\n        if sharding_specs.count(dim_sharding) > 1:\n            raise ValueError(('Mesh dimension {mesh_dim} was repeated in sharding ' + 'specification {sharding_specs}. Mesh dimensions must be unique ' + 'in a layout.').format(mesh_dim=dim_sharding, sharding_specs=sharding_specs))\n        if dim_sharding not in mesh:\n            raise ValueError(('{dim_sharding}: A dimension sharding must either be a ' + 'valid mesh dimension or UNSHARDED.').format(dim_sharding=dim_sharding))\n    super().__init__(type=LayoutType.STATIC, sharding_specs=sharding_specs, mesh=mesh)",
    "docstring": "Builds a Layout from a list of dimension names and a Mesh. Args: sharding_specs: List of sharding specifications, each corresponding to a tensor axis. Each specification (dim_sharding) can either be a mesh dimension or the special value UNSHARDED. mesh: A mesh configuration for the Tensor. Returns: A valid Layout built with given layout & mesh.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sharding_specs arg:mesh arguments arg arg arg If Call Raise Call For Call If BoolOp Compare Compare If Compare Call Raise Call Call If Compare Raise Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    if isinstance(X, str):\n        raise ValueError('Iterable over raw text documents expected, string object received.')\n    self._warn_for_unused_params()\n    self._validate_ngram_range()\n    self._get_hasher().fit(X, y=y)\n    return self",
    "docstring": "Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : ndarray of shape [n_samples, n_features] Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object HashingVectorizer instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg If Call Raise Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_ps_failure",
    "source_code": "def _is_ps_failure(error):\n    if isinstance(error, PSUnavailableError):\n        return True\n    if isinstance(error, (ClosureInputError, ClosureAbortedError)):\n        error = error.original_exception\n    if _RPC_ERROR_FROM_PS not in str(error):\n        return False\n    if isinstance(error, (errors.UnavailableError, errors.AbortedError)):\n        return True\n    if isinstance(error, errors.InvalidArgumentError):\n        if 'unknown device' in str(error).lower() or 'Unable to find the relevant tensor remote_handle' in str(error):\n            return True\n    return False",
    "docstring": "Whether the error is considered a parameter server failure.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_is_ps_failure arg:error arguments arg If Call Return return:yes If Call Assign If Compare Call Return return:yes If Call Return return:yes If Call If BoolOp Compare Call Call Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "validate_html_favicon",
    "source_code": "def validate_html_favicon(app: Sphinx, config: Config) -> None:\n    if config.html_favicon and (not (app.confdir / config.html_favicon).is_file()) and (not is_url(config.html_favicon)):\n        logger.warning(__('favicon file %r does not exist'), config.html_favicon)\n        config.html_favicon = None",
    "docstring": "Check html_favicon setting.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py",
    "ast_data": "FunctionDef name:validate_html_favicon arg:app arg:config arguments arg arg If BoolOp Call Call Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "__truediv__",
    "source_code": "def __truediv__(self, other):\n    if not self._check_binop_other(other) or isinstance(other, StateSpace):\n        return NotImplemented\n    if isinstance(other, np.ndarray) and other.ndim > 0:\n        raise ValueError('Cannot divide StateSpace by non-scalar numpy arrays')\n    return self.__mul__(1 / other)",
    "docstring": "Divide by a scalar",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__truediv__ arg:self arg:other arguments arg arg If BoolOp Call Call Return return:yes If BoolOp Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "reset_option",
    "source_code": "def reset_option(pat: str) -> None:\n    keys = _select_options(pat)\n    if len(keys) == 0:\n        raise OptionError(f'No such keys(s) for pat={pat!r}')\n    if len(keys) > 1 and len(pat) < 4 and (pat != 'all'):\n        raise ValueError('You must specify at least 4 characters when resetting multiple keys, use the special keyword \"all\" to reset all the options to their default value')\n    for k in keys:\n        set_option(k, _registered_options[k].defval)",
    "docstring": "Reset one or more options to their default value. This method resets the specified pandas option(s) back to their default values. It allows partial string matching for convenience, but users should exercise caution to avoid unintended resets due to changes in option names in future versions. Parameters ---------- pat : str/regex If specified only options matching `User Guide `. Examples -------- >>> pd.reset_option(\"display.max_columns\") # doctest: +SKIP",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:reset_option arg:pat arguments arg Assign Call If Compare Call Raise Call If BoolOp Compare Call Compare Call Compare Raise Call For Call"
  },
  {
    "library": "pytorch",
    "name": "_send_requests",
    "source_code": "def _send_requests(self):\n    fake_data = torch.randn(self.batch_size, 3, 250, 250, requires_grad=False)\n    other_data = [torch.randn(self.batch_size, 3, 250, 250, requires_grad=False) for i in range(self.num_iters)]\n    self.request_queue.put((fake_data, time.time()))\n    self.read_requests_event.set()\n    self.warmup_event.wait()\n    self.read_requests_event.set()\n    self.start_send_time = time.time()\n    for i in range(self.num_iters):\n        self.request_queue.put((other_data[i], time.time()))",
    "docstring": "This function will send one warmup request, and then num_iters requests to the backend process.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\inference\\server.py",
    "ast_data": "FunctionDef name:_send_requests arg:self arguments arg Assign Call Assign Call Call Call Call Call Call Call Assign Call For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_rocm_install_path",
    "source_code": "def _get_rocm_install_path():\n    rocm_install_path = _get_default_rocm_path()\n    if 'ROCM_PATH' in os.environ:\n        rocm_install_path = os.environ['ROCM_PATH']\n    return rocm_install_path",
    "docstring": "Determines and returns the ROCm installation path.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_rocm_config.py",
    "ast_data": "FunctionDef name:_get_rocm_install_path arguments Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_foreach_map",
    "source_code": "@register_lowering(torch._higher_order_ops._foreach_map, type_promotion_kind=None)\ndef _foreach_map(subgraph, *args, **kwargs):\n    from .subgraph_lowering import PointwiseSubgraphLowering\n    inputs = args\n    gm = subgraph.graph_module\n    pw_subgraph = PointwiseSubgraphLowering(gm, root_graph_lowering=V.graph)\n    with V.set_graph_handler(pw_subgraph):\n        pw_subgraph.run(*inputs)\n    sub_outputs = pw_subgraph.graph_outputs\n    assert sub_outputs\n    groups = group_foreach_args(sub_outputs)\n    outputs = [None] * len(sub_outputs)\n    for (device, use_foreach), group in groups.items():\n        operation_list: list[str] = []\n        for output_ind, output in group:\n            outputs[output_ind] = output\n            if V.graph.has_feature(device, BackendFeature.FOREACH) and use_foreach:\n                output.realize()\n                operation_list.append(output.get_operation_name())\n        if operation_list:\n            V.graph.register_operation_list(operation_list)\n    assert all((x is not None for x in outputs))\n    return outputs",
    "docstring": "This lowers an invocation of foreach_map The way this works is that an arbitrary N-arg func is provided by the user, looped over by the polyfill with the same semantics as a foreach op (a loop applying an n-ary function to n args) and then traced into a subgraph by dynamo. This code allows us to inline the subgraph into the main graph lowering using the PontwiseSubgraphLowering. The graph outputs represent the vertically fused sequence of ops, and then register_operation_list below registers the buffers as horizontally fuseable in the scheduler.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:_foreach_map arg:subgraph arguments arg arg arg Assign Assign Assign Call With Call Call Assign Assign Call Assign Call For Call For Assign If BoolOp Call Call Call Call If Call Call Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_check_flatten_did_not_remove",
    "source_code": "def _check_flatten_did_not_remove(original, jit_flattened):\n\n    def flatten(x):\n        if isinstance(x, (list, tuple)):\n            for inner in x:\n                yield from flatten(inner)\n        elif isinstance(x, dict):\n            for inner in x.values():\n                yield from flatten(inner)\n        else:\n            yield x\n    flattened_with_none = list(flatten(original))\n    num_none = len(flattened_with_none) - len(jit_flattened)\n    assert num_none >= 0\n    if num_none:\n        raise ValueError(f\"args contained {num_none} None's after flattening. When exporting a ScriptModule or ScriptFunction, no args may be None because that breaks type propagation.\")",
    "docstring": "torch.jit._flatten removes None. Check if it did so in this case.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\utils.py",
    "ast_data": "FunctionDef name:_check_flatten_did_not_remove arg:original arg:jit_flattened arguments arg arg FunctionDef name:flatten arg:x arguments arg If Call For Call If Call For Call Call Assign Call Call Assign Call Call Compare If Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "paired_cosine_distances",
    "source_code": "@validate_params({'X': ['array-like', 'sparse matrix'], 'Y': ['array-like', 'sparse matrix']}, prefer_skip_nested_validation=True)\ndef paired_cosine_distances(X, Y):\n    X, Y = check_paired_arrays(X, Y)\n    return 0.5 * row_norms(normalize(X) - normalize(Y), squared=True)",
    "docstring": "Compute the paired cosine distances between X and Y. Read more in the :ref:. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) An array where each row is a sample and each column is a feature. Y : {array-like, sparse matrix} of shape (n_samples, n_features) An array where each row is a sample and each column is a feature. Returns ------- distances : ndarray of shape (n_samples,) Returns the distances between the row vectors of and the row vectors of , where is the distance between and . Notes ----- The cosine distance is equivalent to the half the squared euclidean distance if each sample is normalized to unit norm. Examples -------- >>> from sklearn.metrics.pairwise import paired_cosine_distances >>> X = [[0, 0, 0], [1, 1, 1]] >>> Y = [[1, 0, 0], [1, 1, 0]] >>> paired_cosine_distances(X, Y) array([0.5 , 0.184])",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py",
    "ast_data": "FunctionDef name:paired_cosine_distances arg:X arg:Y arguments arg arg Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "select_columns_by_name",
    "source_code": "@abstractmethod\ndef select_columns_by_name(self, names: Sequence[str]) -> DataFrame:\n    pass",
    "docstring": "Create a new DataFrame by selecting a subset of columns by name.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py",
    "ast_data": "FunctionDef name:select_columns_by_name arg:self arg:names arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "run_and_get_constant_graph",
    "source_code": "def run_and_get_constant_graph(gm: torch.fx.GraphModule) -> torch.fx.GraphModule:\n    constant_graph_tag(gm)\n    for node in gm.graph.find_nodes(op='get_attr'):\n        used_to_fold = False\n        for u in node.users:\n            if u.meta[META_TAG] == CONST_MODULE_TAG:\n                used_to_fold = True\n                break\n        if not used_to_fold:\n            node.meta[META_TAG] = MODULE_TAG\n    new_graph = torch.fx.Graph()\n    node_remapping: dict[torch.fx.Node, torch.fx.Node] = {}\n    output_nodes = []\n    for node in gm.graph.nodes:\n        if node.meta[META_TAG] == MODULE_TAG:\n            continue\n        new_node = new_graph.node_copy(node, lambda x: node_remapping[x])\n        node_remapping[node] = new_node\n        for user in node.users:\n            if user.meta[META_TAG] == MODULE_TAG:\n                output_nodes.append(new_node)\n                break\n    new_graph.output(tuple(output_nodes))\n    new_graph.lint()\n    new_gm = torch.fx.GraphModule(gm, new_graph)\n    return new_gm",
    "docstring": "Construct a GraphModule which corresponds to the part which could be constant folded in provided gm.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\constant_folding.py",
    "ast_data": "FunctionDef name:run_and_get_constant_graph arg:gm arguments arg Call For Call Assign For If Compare Assign If Assign Assign Call Assign For If Compare Assign Call arguments arg Assign For If Compare Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "recalculate_batch_size",
    "source_code": "def recalculate_batch_size(type_spec):\n    output_shape = type_spec._to_legacy_output_shapes()\n    if not isinstance(output_shape, tensor_shape.TensorShape):\n        return None\n    if output_shape.rank is None:\n        return None\n    if len(output_shape) < 1:\n        raise ValueError('Invalid `input_dataset`. Expected a dataset whose elements have rank >= 1 but found a dataset whose elements are scalars. Fix the issue by adding the `batch` transformation to the dataset.')\n    output_dims = [d.value for d in output_shape.dims]\n    if output_dims[0] is not None and output_dims[0] % num_replicas == 0:\n        return output_dims[0] // num_replicas\n    return None",
    "docstring": "Recalculates the output_shape after dividing it by num_replicas.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\distribute.py",
    "ast_data": "FunctionDef name:recalculate_batch_size arg:type_spec arguments arg Assign Call If Call Return return:no If Compare Return return:no If Compare Call Raise Call Assign If BoolOp Compare Compare Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "to_dict",
    "source_code": "def to_dict(self) -> dict[str, Any]:\n    d: dict[str, Any] = {}\n    for quant_type, observed_to_quantized_mapping in self.observed_to_quantized_mapping.items():\n        if OBSERVED_TO_QUANTIZED_DICT_KEY not in d:\n            d[OBSERVED_TO_QUANTIZED_DICT_KEY] = {}\n        d[OBSERVED_TO_QUANTIZED_DICT_KEY][_get_quant_type_to_str(quant_type)] = observed_to_quantized_mapping\n    if len(self.preserved_attributes) > 0:\n        d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes\n    return d",
    "docstring": "Convert this `~torch.ao.quantization.fx.custom_config.ConvertCustomConfig.from_dict`.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:to_dict arg:self arguments arg For Call If Compare Assign Assign Call If Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "self_adjoint_eig",
    "source_code": "@tf_export('linalg.eigh', v1=['linalg.eigh', 'self_adjoint_eig'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('self_adjoint_eig')\ndef self_adjoint_eig(tensor, name=None):\n    e, v = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=True, name=name)\n    return (e, v)",
    "docstring": "Computes the eigen decomposition of a batch of self-adjoint matrices. Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in such that , for i=0...N-1. Args: tensor: of shape . Only the lower triangular part of each inner inner matrix is referenced. name: string, optional name of the operation. Returns: e: Eigenvalues. Shape is . Sorted in non-decreasing order. v: Eigenvectors. Shape is . The columns of the inner most matrices contain eigenvectors of the corresponding matrices in",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_ops.py",
    "ast_data": "FunctionDef name:self_adjoint_eig arg:tensor arg:name arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "DynamicMetaLoadPlanner",
    "source_code": "class DynamicMetaLoadPlanner(DefaultLoadPlanner):\n\n    def set_up_planner(self, state_dict: STATE_DICT_TYPE, metadata: Optional[Metadata]=None, is_coordinator: bool=False) -> None:\n        super().set_up_planner(state_dict, metadata, is_coordinator)\n        state_dict_metadata: dict[str, STORAGE_TYPES] = {}\n        for key, tensor in self.state_dict.items():\n            if not torch.is_tensor(tensor):\n                raise RuntimeError(f'Non-tensor value identified at {key}. At this time {type(self).__name__} only supports loading Tensors.')\n            state_dict_metadata[key] = TensorStorageMetadata(TensorProperties(dtype=tensor.dtype), tensor.size(), _create_chunk_list(tensor))\n        self.metadata = Metadata(state_dict_metadata=state_dict_metadata)",
    "docstring": "Extension of DefaultLoadPlanner, which creates a new Metadata object based on the passed in state dict, avoiding the need to read metadata from disk. This is useful when reading formats which don't have a metadata file, like Torch Save files. . N.B. Intended to be used with BroadcastingTorchSaveReader .. warning:: Current implementation only supports loading Tensors. >>> # xdoctest: +SKIP(\"undefined vars\") >>> sd = {\"mode\": model} >>> dcp.load( >>> sd, >>> storage_reader=BroadcastingTorchSaveReader(), >>> planner=DynamicMetaLoadPlanner(), >>> checkpoint_id=\"path_to_model.pt\" >>> )",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\format_utils.py",
    "ast_data": "ClassDef name:DynamicMetaLoadPlanner FunctionDef name:set_up_planner arg:self arg:state_dict arg:metadata arg:is_coordinator arguments arg arg arg arg Call Call For Call If Call Raise Call Call Assign Call Call Call Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "precompute_warp_grid",
    "source_code": "def precompute_warp_grid(self, src_homo_dst: Tensor) -> None:\n    self._warped_grid = warp_grid(self.grid, src_homo_dst)",
    "docstring": "Compute and store internally the transformations of the points. Useful when the same homography/homographies are reused. Args: src_homo_dst: Homography or homographies (stacked) to transform all points in the grid. Shape of the homography has to be :math: or :math:. The homography assumes normalized coordinates [-1, 1] if normalized_coordinates is True.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\transform\\homography_warper.py",
    "ast_data": "FunctionDef name:precompute_warp_grid arg:self arg:src_homo_dst arguments arg arg Assign Call"
  },
  {
    "library": "scipy",
    "name": "__getattr__",
    "source_code": "def __getattr__(self, attr):\n    if attr != 'meta' and attr in self.meta:\n        return self.meta[attr]\n    else:\n        raise AttributeError(f\"'{attr}' not in metadata\")",
    "docstring": "Dispatch attribute access to the metadata dictionary.",
    "type": "method",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "FunctionDef name:__getattr__ arg:self arg:attr arguments arg arg If BoolOp Compare Compare Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "create_slot",
    "source_code": "def create_slot(primary, val, name, colocate_with_primary=True, *, copy_xla_sharding=False):\n    validate_shape = val.get_shape().is_fully_defined()\n    if isinstance(primary, variables.Variable):\n        prefix = primary._shared_name\n    else:\n        prefix = primary.op.name\n    with variable_scope.variable_scope(None, prefix + '/' + name):\n        if colocate_with_primary:\n            distribution_strategy = distribute_lib.get_strategy()\n            with distribution_strategy.extended.colocate_vars_with(primary):\n                return _create_slot_var(primary, val, '', validate_shape, None, None, copy_xla_sharding=copy_xla_sharding)\n        else:\n            return _create_slot_var(primary, val, '', validate_shape, None, None, copy_xla_sharding=copy_xla_sharding)",
    "docstring": "Create a slot initialized to the given value. The type of the slot is determined by the given value. Args: primary: The primary or . val: A specifying the initial value of the slot. name: Name to use for the slot variable. colocate_with_primary: Boolean. If True the slot is located on the same device as . copy_xla_sharding: Boolean. If True also copies XLA sharding from primary. Returns: A object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\slot_creator.py",
    "ast_data": "FunctionDef name:create_slot arg:primary arg:val arg:name arg:colocate_with_primary arguments arg arg arg arg arg Assign Call Call If Call Assign Assign With Call If Assign Call With Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_munp",
    "source_code": "def _munp(self, n, beta, m):\n    N = 1.0 / (m / beta / (m - 1) * np.exp(-beta ** 2 / 2.0) + _norm_pdf_C * _norm_cdf(beta))\n\n    def n_th_moment(n, beta, m):\n        A = (m / beta) ** m * np.exp(-beta ** 2 / 2.0)\n        B = m / beta - beta\n        rhs = 2 ** ((n - 1) / 2.0) * sc.gamma((n + 1) / 2) * (1.0 + (-1) ** n * sc.gammainc((n + 1) / 2, beta ** 2 / 2))\n        lhs = np.zeros(rhs.shape)\n        for k in range(int(n) + 1):\n            lhs += sc.binom(n, k) * B ** (n - k) * (-1) ** k / (m - k - 1) * (m / beta) ** (-m + k + 1)\n        return A * lhs + rhs\n    return N * xpx.apply_where(n + 1 < m, (n, beta, m), np.vectorize(n_th_moment, otypes=[np.float64]), fill_value=np.inf)",
    "docstring": "Returns the n-th non-central moment of the crystalball function.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_munp arg:self arg:n arg:beta arg:m arguments arg arg arg arg Assign Call Call FunctionDef name:n_th_moment arg:n arg:beta arg:m arguments arg arg arg Assign Call Assign Assign Call Call Assign Call For Call Call Call Return return:yes Return return:yes Call Compare Call"
  },
  {
    "library": "scipy",
    "name": "tol",
    "source_code": "@property\ndef tol(self):\n    return self._tol",
    "docstring": "positive float: The desired relative tolerance of calculations. Left unspecified, calculations may be faster; when provided, calculations may be more likely to meet the desired accuracy.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "FunctionDef name:tol arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_nan",
    "source_code": "def is_nan(x):\n    if not isinstance(x, numbers.Number):\n        return False\n    if isinstance(x, complex):\n        return math.isnan(x.real) or math.isnan(x.imag)\n    else:\n        return math.isnan(x)",
    "docstring": "Checks if given value is a Python NaN.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\trace_type\\default_types.py",
    "ast_data": "FunctionDef name:is_nan arg:x arguments arg If Call Return return:yes If Call Return return:yes BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Ripple25",
    "source_code": "class Ripple25(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [1.0] * self.N))\n        self.global_optimum = [[0.1 for _ in range(self.N)]]\n        self.fglob = -2.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        u = -2.0 * log(2.0) * ((x - 0.1) / 0.8) ** 2.0\n        v = sin(5.0 * pi * x) ** 6.0\n        return sum(-exp(u) * v)",
    "docstring": "Ripple 25 objective function. This class defines the Ripple 25 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Ripple25}}(x) = \\sum_{i=1}^2 -e^{-2 \\log 2 (\\frac{x_i-0.1}{0.8})^2} \\left[\\sin^6(5 \\pi x_i) \\right] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_R.py",
    "ast_data": "ClassDef name:Ripple25 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_ticklabels",
    "source_code": "def get_ticklabels(self, minor=False, which=None):\n    if which is not None:\n        if which == 'minor':\n            return self.get_minorticklabels()\n        elif which == 'major':\n            return self.get_majorticklabels()\n        elif which == 'both':\n            return self.get_majorticklabels() + self.get_minorticklabels()\n        else:\n            _api.check_in_list(['major', 'minor', 'both'], which=which)\n    if minor:\n        return self.get_minorticklabels()\n    return self.get_majorticklabels()",
    "docstring": "Get this Axis' tick labels. Parameters ---------- minor : bool Whether to return the minor or the major ticklabels. which : None, ('minor', 'major', 'both') Overrides *minor*. Selects which ticklabels to return Returns ------- list of",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_ticklabels arg:self arg:minor arg:which arguments arg arg arg If Compare If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Call Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._implementation._dtype",
    "docstring": "The data type of this TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_Hints",
    "source_code": "class _Hints:\n\n    def __init__(self, is_non_singular=None, is_positive_definite=None, is_self_adjoint=None):\n        self.is_non_singular = is_non_singular\n        self.is_positive_definite = is_positive_definite\n        self.is_self_adjoint = is_self_adjoint",
    "docstring": "Holds 'is_X' flags that every LinearOperator is initialized with.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_addition.py",
    "ast_data": "ClassDef name:_Hints FunctionDef name:__init__ arg:self arg:is_non_singular arg:is_positive_definite arg:is_self_adjoint arguments arg arg arg arg Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "tocoo",
    "source_code": "def tocoo(self, copy=False):\n    return self.tocsr(copy=False).tocoo(copy=copy)",
    "docstring": "Convert this array/matrix to COOrdinate format. With copy=False, the data/indices may be shared between this array/matrix and the resultant coo_array/matrix.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "FunctionDef name:tocoo arg:self arg:copy arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "serialize_sparse_v2",
    "source_code": "@tf_export('io.serialize_sparse', v1=[])\n@dispatch.add_dispatch_support\ndef serialize_sparse_v2(sp_input, out_type=dtypes.string, name=None):\n    sp_input = _convert_to_sparse_tensor(sp_input)\n    return gen_sparse_ops.serialize_sparse(sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type)",
    "docstring": "Serialize a into a 3-vector (1-D ) object. Args: sp_input: The input . out_type: The to use for serialization. name: A name prefix for the returned tensors (optional). Returns: A 3-vector (1-D ), with each column representing the serialized 's indices, values, and shape (respectively). Raises: TypeError: If is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:serialize_sparse_v2 arg:sp_input arg:out_type arg:name arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_parse_accept_lang_header",
    "source_code": "@functools.lru_cache(maxsize=1000)\ndef _parse_accept_lang_header(lang_string):\n    result = []\n    pieces = accept_language_re.split(lang_string.lower())\n    if pieces[-1]:\n        return ()\n    for i in range(0, len(pieces) - 1, 3):\n        first, lang, priority = pieces[i:i + 3]\n        if first:\n            return ()\n        if priority:\n            priority = float(priority)\n        else:\n            priority = 1.0\n        result.append((lang, priority))\n    result.sort(key=lambda k: k[1], reverse=True)\n    return tuple(result)",
    "docstring": "Parse the lang_string, which is the body of an HTTP Accept-Language header, and return a tuple of (lang, q-value), ordered by 'q' values. Return an empty tuple if there are any format errors in lang_string.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:_parse_accept_lang_header arg:lang_string arguments arg Assign Assign Call Call If Return return:no For Call Call Assign If Return return:no If Assign Call Assign Call Call arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_prepare",
    "source_code": "def _prepare(self):\n    pass",
    "docstring": "Create all needed tensors before applying gradients. This is called with the name_scope using the \"name\" that users have chosen for the application of gradients.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_prepare arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "deprecate_option",
    "source_code": "def deprecate_option(key: str, msg: str | None=None, rkey: str | None=None, removal_ver: str | None=None) -> None:\n    key = key.lower()\n    if key in _deprecated_options:\n        raise OptionError(f\"Option '{key}' has already been defined as deprecated.\")\n    _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)",
    "docstring": "Mark option as deprecated, if code attempts to access this option, a warning will be produced, using if given, or a default message if not. if is given, any access to the key will be re-routed to . Neither the existence of nor that if is checked. If they do not exist, any subsequence access will fail as usual, after the deprecation warning is given. Parameters ---------- key : str Name of the option to be deprecated. must be a fully-qualified option name (e.g \"x.y.z.rkey\"). msg : str, optional Warning message to output when the key is referenced. if no message is given a default message will be emitted. rkey : str, optional Name of an option to reroute access to. If specified, any referenced will be re-routed to including set/get/reset. rkey must be a fully-qualified option name (e.g \"x.y.z.rkey\"). used by the default message if no is specified. removal_ver : str, optional Specifies the version in which this option will be removed. used by the default message if no is specified. Raises ------ OptionError If the specified key has already been deprecated.",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:deprecate_option arg:key arg:msg arg:rkey arg:removal_ver arguments arg arg arg arg Assign Call If Compare Raise Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "format",
    "source_code": "def format(self, name: str, roffset=True) -> str:\n\n    def remove_roffsets(expr: sympy.Expr) -> sympy.Expr:\n        for symt in TritonSymbols.reduction_types:\n            expr = self.replace_offset(expr, sympy.Integer(0), symt)\n        return expr\n    f = V.kernel.index_to_str\n    offsets = [*self.offsets]\n    if not roffset:\n        offsets = [remove_roffsets(offset) for offset in offsets]\n    args = [f'{name} + ({f(self.constant_offset)})' if self.constant_offset != 0 else name, f'shape={f(self.shape)}', f'strides={f(self.strides)}', f'block_shape={f(self.block_shape)}', f'order={f(self.order)}', f'offsets={f(offsets)}']\n    return f'tl.make_block_ptr({', '.join(args)})'",
    "docstring": "Codegen a call to tl.make_block_ptr() Args: name: variable name for pointer roffset: should rn_offset be included in offsets=..., for use with tl.advance() Returns: \"tl.make_block_ptr(...)\"",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:format arg:self arg:name arg:roffset arguments arg arg arg FunctionDef name:remove_roffsets arg:expr arguments arg For Assign Call Call Return return:yes Assign Assign If Assign Call Assign Compare Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_iter_connected_components",
    "source_code": "def _iter_connected_components(self):\n    if self.codes is None:\n        yield self\n    else:\n        idxs = np.append((self.codes == Path.MOVETO).nonzero()[0], len(self.codes))\n        for sl in map(slice, idxs, idxs[1:]):\n            yield Path._fast_from_codes_and_verts(self.vertices[sl], self.codes[sl], self)",
    "docstring": "Return subpaths split at MOVETOs.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:_iter_connected_components arg:self arguments arg If Compare Assign Call Call Compare Call For Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, kernel_name: str, runtime_arg_info: list['ArgInfo'], runtime_arg_values: list[Any]) -> None:\n    super().__init__()\n    self.kernel_name = kernel_name\n    self.named_nodes: dict[str, IRNode] = {}\n    self.runtime_arg_info = runtime_arg_info\n    self.runtime_arg_values = runtime_arg_values",
    "docstring": "Initializes a new instance of the ROCmTemplateKernel class. Args: kernel_name (str): The name of the kernel.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\rocm_kernel.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:kernel_name arg:runtime_arg_info arg:runtime_arg_values arguments arg arg arg arg Call Call Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "vertices",
    "source_code": "@property\ndef vertices(self):\n    return self._vertices",
    "docstring": "The vertices of the as an (N, 2) array.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:vertices arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "same_ordered",
    "source_code": "@staticmethod\ndef same_ordered(sizes, stride):\n    assert len(sizes) == len(stride)\n    stride = [V.graph.sizevars.size_hint(x) for x in stride]\n    fill_order = sorted(range(len(stride)), key=stride.__getitem__)\n    return FlexibleLayout.fill_ordered(sizes, fill_order)",
    "docstring": "Create a stride that has the same stride order as given stride For example, if given stride is [1000, 1, 100, 10], the fill order should be [1, 3, 2, 0]",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:same_ordered arg:sizes arg:stride arguments arg arg Compare Call Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_select_best_index",
    "source_code": "@staticmethod\ndef _select_best_index(refit, refit_metric, results):\n    if callable(refit):\n        best_index = refit(results)\n        if not isinstance(best_index, numbers.Integral):\n            raise TypeError('best_index_ returned is not an integer')\n        if best_index < 0 or best_index >= len(results['params']):\n            raise IndexError('best_index_ index out of range')\n    else:\n        best_index = results[f'rank_test_{refit_metric}'].argmin()\n    return best_index",
    "docstring": "Select index of the best combination of hyperparemeters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:_select_best_index arg:refit arg:refit_metric arg:results arguments arg arg arg If Call Assign Call If Call Raise Call If BoolOp Compare Compare Call Raise Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "delete_file",
    "source_code": "@tf_export(v1=['gfile.Remove'])\ndef delete_file(filename):\n    delete_file_v2(filename)",
    "docstring": "Deletes the file located at 'filename'. Args: filename: string, a filename Raises: errors.OpError: Propagates any errors reported by the FileSystem API. E.g., if the file does not exist.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:delete_file arg:filename arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_agg_filter",
    "source_code": "def get_agg_filter(self):\n    return self._agg_filter",
    "docstring": "Return filter function to be used for agg filter.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_agg_filter arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, num_packs=1):\n    if num_packs < 0:\n        raise ValueError('HierarchicalCopy requires num_packs >= 0, but {} is specified'.format(num_packs))\n    super(HierarchicalCopyAllReduce, self).__init__(all_reduce_alg='hierarchical_copy', num_packs=num_packs)",
    "docstring": "Initializes the object. Args: num_packs: a non-negative integer. The number of packs to split values into. If zero, no packing will be done. Raises: ValueError if is negative.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_packs arguments arg arg If Compare Raise Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "RandomNormal",
    "source_code": "class RandomNormal(Initializer):\n\n    def __init__(self, mean=0.0, stddev=0.05, seed=None):\n        self.mean = mean\n        self.stddev = stddev\n        self.seed = seed\n        self._random_generator = _RandomGenerator(seed)\n\n    def __call__(self, shape, dtype=None, **kwargs):\n        _validate_kwargs(self.__class__.__name__, kwargs)\n        dtype = _assert_float_dtype(_get_dtype(dtype))\n        if _PARTITION_SHAPE in kwargs:\n            shape = kwargs[_PARTITION_SHAPE]\n        return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype)\n\n    def get_config(self):\n        return {'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed}",
    "docstring": "Initializer that generates tensors with a normal distribution. Also available via the shortcut function . Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "ClassDef name:RandomNormal FunctionDef name:__init__ arg:self arg:mean arg:stddev arg:seed arguments arg arg arg arg Assign Assign Assign Assign Call FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call Call If Compare Assign Return return:yes Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "numpy",
    "source_code": "def numpy(self):\n    if not self._is_eager():\n        raise ValueError('RaggedTensor.numpy() is only supported in eager mode.')\n    values = self.values.numpy()\n    splits = self.row_splits.numpy()\n    rows = [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)]\n    if not rows:\n        return np.zeros((0, 0) + values.shape[1:], dtype=values.dtype)\n    has_variable_length_rows = any((len(row) != len(rows[0]) for row in rows))\n    dtype = np.object_ if has_variable_length_rows else None\n    return np.array(rows, dtype=dtype)",
    "docstring": "Returns a numpy with the values for this . Requires that this was constructed in eager execution mode. Ragged dimensions are encoded using numpy with and , where each element is a single row. #### Examples In the following example, the value returned by contains three numpy objects: one for each row (with and ), and one to combine them (with and ): >>> tf.ragged.constant([[1, 2, 3], [4, 5]], dtype=tf.int64).numpy() array([array([1, 2, 3]), array([4, 5])], dtype=object) Uniform dimensions are encoded using multidimensional numpy s. In the following example, the value returned by contains a single numpy object, with and : >>> tf.ragged.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.int64).numpy() array([[1, 2, 3], [4, 5, 6]]) Returns: A numpy .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:numpy arg:self arguments arg If Call Raise Call Assign Call Assign Call Assign Call Call If Return return:yes Call Assign Call Compare Call Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "cpu",
    "source_code": "def cpu(self):\n    if self.device.type != 'cpu':\n        return torch.UntypedStorage(self.size()).copy_(self, False)\n    return self",
    "docstring": "Return a CPU copy of this storage if it's not already on the CPU.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:cpu arg:self arguments arg If Compare Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "from_descriptor",
    "source_code": "@staticmethod\n@abc.abstractmethod\ndef from_descriptor(version: str) -> 'Extension':\n    pass",
    "docstring": "See ExtensionRegistry.from_descriptor_list",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_extension.py",
    "ast_data": "FunctionDef name:from_descriptor arg:version arguments arg"
  },
  {
    "library": "django",
    "name": "gettext",
    "source_code": "def gettext(message):\n    global _default\n    eol_message = message.replace('\\r\\n', '\\n').replace('\\r', '\\n')\n    if eol_message:\n        _default = _default or translation(settings.LANGUAGE_CODE)\n        translation_object = getattr(_active, 'value', _default)\n        result = translation_object.gettext(eol_message)\n    else:\n        result = type(message)('')\n    if isinstance(message, SafeData):\n        return mark_safe(result)\n    return result",
    "docstring": "Translate the 'message' string. It uses the current thread to find the translation object to use. If no current translation is activated, the message will be run through the default translation object.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\trans_real.py",
    "ast_data": "FunctionDef name:gettext arg:message arguments arg Assign Call Call If Assign BoolOp Call Assign Call Assign Call Assign Call Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "copy_origin",
    "source_code": "def copy_origin(from_node, to_node):\n    origin = anno.Basic.ORIGIN.of(from_node, default=None)\n    if origin is None:\n        return\n    if not isinstance(to_node, (list, tuple)):\n        to_node = (to_node,)\n    for node in to_node:\n        for n in gast.walk(node):\n            anno.setanno(n, anno.Basic.ORIGIN, origin)",
    "docstring": "Copies the origin info from a node to another, recursively.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\origin_info.py",
    "ast_data": "FunctionDef name:copy_origin arg:from_node arg:to_node arguments arg arg Assign Call If Compare Return return:no If Call Assign For For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_mul_flops",
    "source_code": "@ops.RegisterStatistics('Mul', 'flops')\ndef _mul_flops(graph, node):\n    return _binary_per_element_op_flops(graph, node)",
    "docstring": "Compute flops for Mul operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_mul_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, *, devices: Optional[list[Union[int, str, torch.device]]]=None):\n    if devices is None:\n        devices = []\n    super().__init__([torch.device(d) for d in devices])",
    "docstring": "Create an empty unset ``, optional): the set of devices on which tensors contained in this future's value are allowed to reside and on which callbacks are allowed to operate.",
    "type": "method",
    "file_path": "pytorch\\torch\\futures\\__init__.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg If Compare Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_num_bytes",
    "source_code": "def get_num_bytes(*args: torch.Tensor, num_in_out_args: int=0) -> int:\n    return sum((arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args)) for i, arg in enumerate(args) if isinstance(arg, torch.Tensor)))",
    "docstring": "Return the total number of bytes the arguments of tensor type takes. For in/out args, tensor sizes are counted twice: once for reading and once for writing. The first num_in_out_args arguments are in out tensors.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\runtime_utils.py",
    "ast_data": "FunctionDef name:get_num_bytes arguments arg arg Return return:yes Call Call Call Call Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_init",
    "source_code": "def _init(self):\n    if True:\n        trans = self._set_transform()\n        self.span = trans.inverted().transform_bbox(self.axes.bbox).width\n        if self.width is None:\n            sn = np.clip(math.sqrt(self.N), 8, 25)\n            self.width = 0.06 * self.span / sn\n        if self._dpi_at_last_init != self.axes.get_figure(root=True).dpi and self.scale is None:\n            self._make_verts(self.XY, self.U, self.V, self.angles)\n        self._dpi_at_last_init = self.axes.get_figure(root=True).dpi",
    "docstring": "Initialization delayed until first draw; allow time for axes setup.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\quiver.py",
    "ast_data": "FunctionDef name:_init arg:self arguments arg If Assign Call Assign Call Call If Compare Assign Call Call Assign If BoolOp Compare Call Compare Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "set_proj_type",
    "source_code": "def set_proj_type(self, proj_type, focal_length=None):\n    _api.check_in_list(['persp', 'ortho'], proj_type=proj_type)\n    if proj_type == 'persp':\n        if focal_length is None:\n            focal_length = 1\n        elif focal_length <= 0:\n            raise ValueError(f'focal_length = {focal_length} must be greater than 0')\n        self._focal_length = focal_length\n    else:\n        if focal_length not in (None, np.inf):\n            raise ValueError(f'focal_length = {focal_length} must be None for proj_type = {proj_type}')\n        self._focal_length = np.inf",
    "docstring": "Set the projection type. Parameters ---------- proj_type : {'persp', 'ortho'} The projection type. focal_length : float, default: None For a projection type of 'persp', the focal length of the virtual camera. Must be > 0. If None, defaults to 1. The focal length can be computed from a desired Field Of View via the equation: focal_length = 1/tan(FOV/2)",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:set_proj_type arg:self arg:proj_type arg:focal_length arguments arg arg arg Call If Compare If Compare Assign If Compare Raise Call Assign If Compare Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "setup_grid_as_args",
    "source_code": "@staticmethod\ndef setup_grid_as_args() -> dict[str, Any]:\n    return {'grid_type': FixedGrid.__name__, 'fixed_grid': ['_grid_0', '_grid_1', '_grid_2'], 'extra_launcher_args': ['_grid_0', '_grid_1', '_grid_2']}",
    "docstring": "Inductor meta so the launcher takes three extra grid arguments",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py",
    "ast_data": "FunctionDef name:setup_grid_as_args arguments Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "make_functional",
    "source_code": "def make_functional(model: nn.Module, disable_autograd_tracking: bool=False) -> tuple[FunctionalModule, tuple[Tensor, ...]]:\n    buffers = list(model.buffers())\n    if len(buffers) > 0:\n        raise RuntimeError('make_functional(model): `model` has buffers. Please use make_functional_with_buffers(model) instead.')\n    return FunctionalModule._create_from(model, disable_autograd_tracking=disable_autograd_tracking)",
    "docstring": "make_functional(model, disable_autograd_tracking=False) -> func, params Given a `make_functionalmake_functional_with_buffers` to avoid unnecessarily tracking history with PyTorch autograd.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\make_functional.py",
    "ast_data": "FunctionDef name:make_functional arg:model arg:disable_autograd_tracking arguments arg arg Assign Call Call If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X, y=None):\n    check_is_fitted(self)\n    X = check_array(X, input_name='X', dtype=FLOAT_DTYPES)\n    X_reconstructed = np.matmul(X, self.x_loadings_.T)\n    X_reconstructed *= self._x_std\n    X_reconstructed += self._x_mean\n    if y is not None:\n        y = check_array(y, input_name='y', dtype=FLOAT_DTYPES)\n        y_reconstructed = np.matmul(y, self.y_loadings_.T)\n        y_reconstructed *= self._y_std\n        y_reconstructed += self._y_mean\n        return (X_reconstructed, y_reconstructed)\n    return X_reconstructed",
    "docstring": "Transform data back to its original space. Parameters ---------- X : array-like of shape (n_samples, n_components) New data, where is the number of samples and is the number of pls components. y : array-like of shape (n_samples,) or (n_samples, n_components) New target, where is the number of samples and is the number of pls components. Returns ------- X_original : ndarray of shape (n_samples, n_features) Return the reconstructed data. y_original : ndarray of shape (n_samples, n_targets) Return the reconstructed target. Only returned when is given. Notes ----- This transformation will only be exact if .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arg:y arguments arg arg arg Call Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "FSDPExtensions",
    "source_code": "class FSDPExtensions(ABC):\n\n    @abstractmethod\n    def pre_flatten_transform(self, tensor: torch.Tensor) -> tuple[torch.Tensor, Optional[Any]]:\n        ...\n\n    @abstractmethod\n    def post_unflatten_transform(self, tensor: torch.Tensor, param_extension: Any) -> torch.Tensor:\n        ...\n\n    @abstractmethod\n    def chunk_tensor(self, tensor: torch.Tensor, rank: int, world_size: int, num_devices_per_node: int, pg: dist.ProcessGroup, device: Optional[torch.device]=None) -> torch.Tensor:\n        ...\n\n    @abstractmethod\n    def chunk_dtensor(self, tensor: torch.Tensor, rank: int, device_mesh: DeviceMesh) -> torch.Tensor:\n        ...\n\n    @abstractmethod\n    def pre_load_state_dict_transform(self, tensor: torch.Tensor) -> tuple[torch.Tensor, list[Shard]]:\n        ...\n\n    @abstractmethod\n    def all_gather_dtensor(self, tensor: DTensor, parent_mesh: Optional[DeviceMesh]) -> torch.Tensor:\n        ...",
    "docstring": "This enables some customizable hooks to enable composability with tensor parallelism. To activate these hooks, use :func: to set a custom :class: that implements the hooks.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fsdp_extensions.py",
    "ast_data": "ClassDef name:FSDPExtensions FunctionDef name:pre_flatten_transform arg:self arg:tensor arguments arg arg FunctionDef name:post_unflatten_transform arg:self arg:tensor arg:param_extension arguments arg arg arg FunctionDef name:chunk_tensor arg:self arg:tensor arg:rank arg:world_size arg:num_devices_per_node arg:pg arg:device arguments arg arg arg arg arg arg arg FunctionDef name:chunk_dtensor arg:self arg:tensor arg:rank arg:device_mesh arguments arg arg arg arg FunctionDef name:pre_load_state_dict_transform arg:self arg:tensor arguments arg arg FunctionDef name:all_gather_dtensor arg:self arg:tensor arg:parent_mesh arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "enable_batch_variable_initialization",
    "source_code": "def enable_batch_variable_initialization():\n    return _EXPERIMENTAL_TPU_BATCH_VARIABLE_INITIALIZATION and context.executing_eagerly() and (not save_context.in_save_context())",
    "docstring": "Whether to batch variable initialization in tf.function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:enable_batch_variable_initialization arguments Return return:yes BoolOp Call Call"
  },
  {
    "library": "pandas",
    "name": "ravel",
    "source_code": "@final\ndef ravel(self, order: str_t='C') -> Self:\n    return self[:]",
    "docstring": "Return a view on self. Parameters ---------- order : {'K', 'A', 'C', 'F'}, default 'C' Specify the memory layout of the view. This parameter is not implemented currently. Returns ------- Index A view on self. See Also -------- numpy.ndarray.ravel : Return a flattened array. Examples -------- >>> s = pd.Series([1, 2, 3], index=[\"a\", \"b\", \"c\"]) >>> s.index.ravel() Index(['a', 'b', 'c'], dtype='object')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:ravel arg:self arg:order arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ValueMutationNew",
    "source_code": "class ValueMutationNew(MutationType):\n\n    def __init__(self) -> None:\n        super().__init__(SourceType.New)\n\n    def __hash__(self):\n        return id(self)\n\n    def __eq__(self, other):\n        return self is other",
    "docstring": "This case of VariableTracker.mutation_type marker indicates 1. Dynamo allows mutation on the value itself (rather than its attributes). 2. The value is created by the bytecode Dynamo is tracing through. For instance, Dynamo could model a newly created list with this marker, indicating that while we need to model mutations to this list, we don't have to emit bytecode for these mutations if the list doesn't escape into the Python world.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "ClassDef name:ValueMutationNew FunctionDef name:__init__ arg:self arguments arg Call Call FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare"
  },
  {
    "library": "seaborn",
    "name": "bin_predictor",
    "source_code": "def bin_predictor(self, bins):\n    x = np.asarray(self.x)\n    if np.isscalar(bins):\n        percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n        bins = np.percentile(x, percentiles)\n    else:\n        bins = np.ravel(bins)\n    dist = np.abs(np.subtract.outer(x, bins))\n    x_binned = bins[np.argmin(dist, axis=1)].ravel()\n    return (x_binned, bins)",
    "docstring": "Discretize a predictor by assigning value to closest bin.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\regression.py",
    "ast_data": "FunctionDef name:bin_predictor arg:self arg:bins arguments arg arg Assign Call If Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_allow_future",
    "source_code": "def get_allow_future(self):\n    return self.allow_future",
    "docstring": "Return if the view should be allowed to display objects from the future.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_allow_future arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_monomial_powers",
    "source_code": "def _monomial_powers(ndim, degree):\n    nmonos = comb(degree + ndim, ndim, exact=True)\n    out = np.zeros((nmonos, ndim), dtype=np.dtype('long'))\n    count = 0\n    for deg in range(degree + 1):\n        for mono in combinations_with_replacement(range(ndim), deg):\n            for var in mono:\n                out[count, var] += 1\n            count += 1\n    return out",
    "docstring": "Return the powers for each monomial in a polynomial. Parameters ---------- ndim : int Number of variables in the polynomial. degree : int Degree of the polynomial. Returns ------- (nmonos, ndim) int ndarray Array where each row contains the powers for each variable in a monomial.",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_rbfinterp.py",
    "ast_data": "FunctionDef name:_monomial_powers arg:ndim arg:degree arguments arg arg Assign Call Assign Call Call Assign For Call For Call Call For Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_watched_variable_resolver",
    "source_code": "def register_watched_variable_resolver(resolver):\n    global _variables_override\n    assert _variables_override is default_get_variables\n    _variables_override = resolver",
    "docstring": "Registers the resolver to be used to get the list of variables to watch. Args: resolver: callable, takes a Variable and returns a list of Variables that shall be watched.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\tape.py",
    "ast_data": "FunctionDef name:register_watched_variable_resolver arg:resolver arguments arg Compare Assign"
  },
  {
    "library": "numpy",
    "name": "_view_is_safe",
    "source_code": "def _view_is_safe(oldtype, newtype):\n    if oldtype == newtype:\n        return\n    if newtype.hasobject or oldtype.hasobject:\n        raise TypeError('Cannot change data-type for array of references.')\n    return",
    "docstring": "Checks safety of a view involving object arrays, for example when doing:: np.zeros(10, dtype=oldtype).view(newtype) Parameters ---------- oldtype : data-type Data type of original ndarray newtype : data-type Data type of the view Raises ------ TypeError If the new type is incompatible with the old type.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:_view_is_safe arg:oldtype arg:newtype arguments arg arg If Compare Return return:no If BoolOp Raise Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "get_mesh_from_args",
    "source_code": "def get_mesh_from_args(self, validate: bool=True) -> DeviceMesh:\n    first_arg = self.args_schema[0]\n    if isinstance(first_arg, (DTensorSpec, OpStrategy)):\n        mesh = first_arg.mesh\n    elif isinstance(first_arg, (list, tuple, TupleStrategy)):\n        first_elem = first_arg.childs[0] if isinstance(first_arg, TupleStrategy) else first_arg[0]\n        assert isinstance(first_elem, (DTensorSpec, OpStrategy))\n        mesh = first_elem.mesh\n    else:\n        raise ValueError(f'Cannot find device mesh from args for op : {self.op}.')\n    if validate:\n        for arg in self.args_schema[1:]:\n            if isinstance(arg, (DTensorSpec, OpStrategy)) and arg.mesh != mesh:\n                raise RuntimeError(f'DTensor does not support cross-mesh operation on {self.op}! Got meshes: {mesh} {arg.mesh}. Please make sure all the arguments have the same DeviceMesh.')\n    return mesh",
    "docstring": "This util can be used to get a mesh from the OpSchema that contains multiple DTensors as arguments. When is True, it will try to validate that all the arguments have the same mesh to avoid unexpected cross mesh errors. NOTE: this util currently does not handle TupleStrategy when , this is because for TupleStrategy there could be different types of checks, i.e.: - for stack and cat like op, we need to check within a TupleStrategy is every input is on the same mesh - for foreach like ops we need to check \"zipped\" inputs are on the same mesh for each index.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py",
    "ast_data": "FunctionDef name:get_mesh_from_args arg:self arg:validate arguments arg arg Assign If Call Assign If Call Assign Call Call Assign Raise Call If For If BoolOp Call Compare Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_standard_rvs",
    "source_code": "def _standard_rvs(self, n, shape, dim, df, random_state):\n    n_tril = dim * (dim - 1) // 2\n    covariances = random_state.normal(size=n * n_tril).reshape(shape + (n_tril,))\n    variances = np.r_[[random_state.chisquare(df - (i + 1) + 1, size=n) ** 0.5 for i in range(dim)]].reshape((dim,) + shape[::-1]).T\n    A = np.zeros(shape + (dim, dim))\n    size_idx = tuple([slice(None, None, None)] * len(shape))\n    tril_idx = np.tril_indices(dim, k=-1)\n    A[size_idx + tril_idx] = covariances\n    diag_idx = np.diag_indices(dim)\n    A[size_idx + diag_idx] = variances\n    return A",
    "docstring": "Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom random_state : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseed` instance then that instance is used. Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_standard_rvs arg:self arg:n arg:shape arg:dim arg:df arg:random_state arguments arg arg arg arg arg arg Assign Assign Call Call Assign Call Call Call Assign Call Assign Call Call Call Assign Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_wx_font",
    "source_code": "def get_wx_font(self, s, prop):\n    _log.debug('%s - get_wx_font()', type(self))\n    key = hash(prop)\n    font = self.fontd.get(key)\n    if font is not None:\n        return font\n    size = self.points_to_pixels(prop.get_size_in_points())\n    self.fontd[key] = font = wx.Font(pointSize=round(size), family=self.fontnames.get(prop.get_name(), wx.ROMAN), style=self.fontangles[prop.get_style()], weight=self.fontweights[prop.get_weight()])\n    return font",
    "docstring": "Return a wx font. Cache font instances for efficiency.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:get_wx_font arg:self arg:s arg:prop arguments arg arg arg Call Call Assign Call Assign Call If Compare Return return:yes Assign Call Call Assign Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "all_forward_schemas",
    "source_code": "def all_forward_schemas() -> dict[str, _TorchSchema]:\n    torch_schemas = [_TorchSchema(s) for s in _C._jit_get_all_schemas()]\n    return {schema.name: schema for schema in torch_schemas if not schema.is_backward()}",
    "docstring": "Returns schemas for all TorchScript forward ops.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_onnx_supported_ops.py",
    "ast_data": "FunctionDef name:all_forward_schemas arguments Assign Call Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "register",
    "source_code": "def register(self, point, **kwargs):\n\n    def decorator(func):\n        attr_name = kwargs.get('name', func.__name__)\n        tool = Tool(point, func, **kwargs)\n        setattr(self, attr_name, tool)\n        return func\n    return decorator",
    "docstring": "Register a hook point handler in the toolbox. Return a decorator which registers the function at the given hook point.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:register arg:self arg:point arguments arg arg arg FunctionDef name:decorator arg:func arguments arg Assign Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_decomps",
    "source_code": "def get_decomps(self, target: TorchOp) -> list[OnnxDecompMeta]:\n    target_or_name: str | TorchOp\n    if isinstance(target, torch._ops.OpOverload):\n        target_or_name = target.name()\n    else:\n        target_or_name = target\n    decomps = self.functions.get(target_or_name, [])\n    return sorted(decomps, key=lambda x: x.is_custom, reverse=True)",
    "docstring": "Returns a list of OnnxDecompMeta for the given op: torch.ops.... The list is ordered by the time of registration. The custom operators should come first in the list. Args: target: The PyTorch node callable target. Returns: A list of OnnxDecompMeta corresponding to the given name, or None if the name is not in the registry.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_registration.py",
    "ast_data": "FunctionDef name:get_decomps arg:self arg:target arguments arg arg If Call Assign Call Assign Assign Call Return return:yes Call arguments arg"
  },
  {
    "library": "pytorch",
    "name": "parameters",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef parameters(self) -> Iterator[torch.nn.Parameter]:\n    for _, param in self.named_parameters():\n        yield param",
    "docstring": "Returns an iterator over original module's parameters.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\exported_program.py",
    "ast_data": "FunctionDef name:parameters arg:self arguments arg For Call Call"
  },
  {
    "library": "pandas",
    "name": "reset",
    "source_code": "def reset(self) -> None:\n    self._mean.reset()",
    "docstring": "Reset the state captured by calls.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\ewm.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "emit_flow_start",
    "source_code": "def emit_flow_start(self, name: str, timestamp: int, pid: int, tid: int, flow_id: int) -> None:\n    event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)\n    event['id'] = flow_id\n    self._events.append(event)",
    "docstring": "Adds a flow start event to the trace. When matched with a flow end event (with the same 'flow_id') this will cause the trace viewer to draw an arrow between the start and end events. Args: name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. flow_id: Identifier of the flow as an integer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:emit_flow_start arg:self arg:name arg:timestamp arg:pid arg:tid arg:flow_id arguments arg arg arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "authlib",
    "name": "ClientMetadataClaims",
    "source_code": "class ClientMetadataClaims(BaseClaims):\n    REGISTERED_CLAIMS = ['require_signed_request_object']\n\n    def validate(self):\n        self._validate_essential_claims()\n        self.validate_require_signed_request_object()\n\n    def validate_require_signed_request_object(self):\n        self.setdefault('require_signed_request_object', False)\n        if not isinstance(self['require_signed_request_object'], bool):\n            raise InvalidClaimError('require_signed_request_object')\n        self._validate_claim_value('require_signed_request_object')",
    "docstring": "Additional client metadata can be used with :ref: and :ref: endpoints. This can be used with:: server.register_endpoint( ClientRegistrationEndpoint( claims_classes=[ rfc7591.ClientMetadataClaims, rfc9101.ClientMetadataClaims, ] ) ) server.register_endpoint( ClientRegistrationEndpoint( claims_classes=[ rfc7591.ClientMetadataClaims, rfc9101.ClientMetadataClaims, ] ) )",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc9101\\registration.py",
    "ast_data": "ClassDef name:ClientMetadataClaims Assign FunctionDef name:validate arg:self arguments arg Call Call FunctionDef name:validate_require_signed_request_object arg:self arguments arg Call If Call Raise Call Call"
  },
  {
    "library": "sphinx",
    "name": "add_role_to_domain",
    "source_code": "def add_role_to_domain(self, domain: str, name: str, role: RoleFunction | XRefRole, override: bool=False) -> None:\n    self.registry.add_role_to_domain(domain, name, role, override=override)",
    "docstring": "Register a Docutils role in a domain. Like :meth:, but the role is added to the domain named *domain*. :param domain: The name of the target domain :param name: The name of the role :param role: The role function :param override: If false, do not install it if another role is already installed as the same name If true, unconditionally install the role. .. versionadded:: 1.0 .. versionchanged:: 1.8 Add *override* keyword.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_role_to_domain arg:self arg:domain arg:name arg:role arg:override arguments arg arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "restore",
    "source_code": "def restore(self, restored_tensors, restored_shapes):\n    tensor, = restored_tensors\n    return self._distributed_variable._policy.get_restore_ops(self._distributed_variable, tensor)",
    "docstring": "Restore the same value into all variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "unscale_",
    "source_code": "def unscale_(self, optimizer: torch.optim.Optimizer) -> None:\n    if not self._enabled:\n        return\n    self._check_scale_growth_tracker('unscale_')\n    optimizer_state = self._per_optimizer_states[id(optimizer)]\n    if optimizer_state['stage'] is OptState.UNSCALED:\n        raise RuntimeError('unscale_() has already been called on this optimizer since the last update().')\n    elif optimizer_state['stage'] is OptState.STEPPED:\n        raise RuntimeError('unscale_() is being called after step().')\n    assert self._scale is not None\n    inv_scale = self._scale.double().reciprocal().float() if self._scale.device != torch.device('mps:0') else self._scale.reciprocal()\n    found_inf = torch.full((), 0.0, dtype=torch.float32, device=self._scale.device)\n    optimizer_state['found_inf_per_device'] = self._unscale_grads_(optimizer, inv_scale, found_inf, False)\n    optimizer_state['stage'] = OptState.UNSCALED",
    "docstring": "Divides (\"unscales\") the optimizer's gradient tensors by the scale factor. :meth: is optional, serving cases where you need to :ref: between the backward pass(es) and :meth:. If :meth: is not called explicitly, gradients will be unscaled automatically during :meth:. Simple example, using :meth: to enable clipping of unscaled gradients:: ... scaler.scale(loss).backward() scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) scaler.step(optimizer) scaler.update() Args: optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled. .. note:: :meth: does not incur a CPU-GPU sync. .. warning:: :meth: should only be called once per optimizer per :meth: call, and only after all gradients for that optimizer's assigned parameters have been accumulated. Calling :meth: twice for a given optimizer between each :meth: triggers a RuntimeError. .. warning:: :meth: may unscale sparse gradients out of place, replacing the `` attribute.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:unscale_ arg:self arg:optimizer arguments arg arg If Return return:no Call Assign Call If Compare Raise Call If Compare Raise Call Compare Assign Compare Call Call Call Call Call Assign Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "fill_uninitialized_memory",
    "source_code": "@property\ndef fill_uninitialized_memory(self):\n    return torch._C._get_deterministic_fill_uninitialized_memory()",
    "docstring": "Whether to fill uninitialized memory with a known value when :meth: is set to ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\deterministic.py",
    "ast_data": "FunctionDef name:fill_uninitialized_memory arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "shape",
    "source_code": "@property\ndef shape(self) -> tuple[int, int]:\n    return (len(self.index), len(self.columns))",
    "docstring": "Return a tuple representing the dimensionality of the DataFrame. Unlike the method, which only returns the number of rows, provides both row and column counts, making it a more informative method for understanding dataset size. See Also -------- numpy.ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4], \"col3\": [5, 6]}) >>> df.shape (2, 3)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "export_chrome_trace",
    "source_code": "def export_chrome_trace(self, path: str):\n    assert self.profiler\n    if path.endswith('.gz'):\n        fp = tempfile.NamedTemporaryFile('w+b', suffix='.json', delete=False)\n        fp.close()\n        retvalue = self.profiler.export_chrome_trace(fp.name)\n        with open(fp.name, 'rb') as fin:\n            with gzip.open(path, 'wb') as fout:\n                fout.writelines(fin)\n        os.remove(fp.name)\n        return retvalue\n    else:\n        return self.profiler.export_chrome_trace(path)",
    "docstring": "Exports the collected trace in Chrome JSON format. If kineto is enabled, only last cycle in schedule is exported.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:export_chrome_trace arg:self arg:path arguments arg arg If Call Assign Call Call Assign Call With Call With Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "extract_method_name",
    "source_code": "def extract_method_name(line: str) -> str:\n    if '(\"' in line:\n        start_token, end_token = ('(\"', '\")')\n    elif \"('\" in line:\n        start_token, end_token = (\"('\", \"')\")\n    else:\n        raise RuntimeError(f'Unable to find appropriate method name within line:\\n{line}')\n    start, end = (line.find(start_token) + len(start_token), line.find(end_token))\n    return line[start:end]",
    "docstring": "Extract method name from decorator in the form of \"@functional_datapipe({method_name})\".",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\gen_pyi.py",
    "ast_data": "FunctionDef name:extract_method_name arg:line arguments arg If Compare Assign If Compare Assign Raise Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_call_partitioner",
    "source_code": "def _call_partitioner(partitioner, shape, dtype):\n    if not shape.is_fully_defined():\n        raise ValueError('Shape of a new partitioned variable must be fully defined, but instead was %s.' % (shape,))\n    if shape.ndims < 1:\n        raise ValueError('A partitioned Variable must have rank at least 1, shape: %s' % shape)\n    slicing = partitioner(shape=shape, dtype=dtype)\n    if not isinstance(slicing, collections_abc.Sequence):\n        raise ValueError('Partitioner must return a sequence, but saw: %s' % slicing)\n    if len(slicing) != shape.ndims:\n        raise ValueError(\"Partitioner returned a partition list that does not match the Variable's rank: %s vs. %s\" % (slicing, shape))\n    if any((p < 1 for p in slicing)):\n        raise ValueError('Partitioner returned zero partitions for some axes: %s' % slicing)\n    if sum((p > 1 for p in slicing)) > 1:\n        raise ValueError('Can only slice a variable along one dimension: shape: %s, partitioning: %s' % (shape, slicing))\n    return slicing",
    "docstring": "Call partitioner validating its inputs/output. Args: partitioner: a function mapping shape and dtype to a list of partitions. shape: shape of the to partition, must have at least two dimensions. dtype: dtype of the elements in the . Returns: A list with elements >=1 and exactly one >1. The index of that element corresponds to the partitioning axis.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:_call_partitioner arg:partitioner arg:shape arg:dtype arguments arg arg arg If Call Raise Call If Compare Raise Call Assign Call If Call Raise Call If Compare Call Raise Call If Call Compare Raise Call If Compare Call Compare Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_test_db_name",
    "source_code": "def _get_test_db_name(self):\n    if self.connection.settings_dict['TEST']['NAME']:\n        return self.connection.settings_dict['TEST']['NAME']\n    return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']",
    "docstring": "Internal implementation - return the name of the test DB that will be created. Only useful when called from create_test_db() and _create_test_db() and when no external munging is done with the 'NAME' settings.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\creation.py",
    "ast_data": "FunctionDef name:_get_test_db_name arg:self arguments arg If Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "make_refnode",
    "source_code": "def make_refnode(builder: Builder, fromdocname: str, todocname: str, targetid: str | None, child: Node | list[Node], title: str | None=None) -> nodes.reference:\n    node = nodes.reference('', '', internal=True)\n    if fromdocname == todocname and targetid:\n        node['refid'] = targetid\n    elif targetid:\n        node['refuri'] = builder.get_relative_uri(fromdocname, todocname) + '#' + targetid\n    else:\n        node['refuri'] = builder.get_relative_uri(fromdocname, todocname)\n    if title:\n        node['reftitle'] = title\n    node += child\n    return node",
    "docstring": "Shortcut to create a reference node.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\nodes.py",
    "ast_data": "FunctionDef name:make_refnode arg:builder arg:fromdocname arg:todocname arg:targetid arg:child arg:title arguments arg arg arg arg arg arg Assign Call If BoolOp Compare Assign If Assign Call Assign Call If Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_structured_grad_output",
    "source_code": "def _get_structured_grad_output(outputs, grads, body_grad_graph):\n    result = []\n    outputs_idx = 3\n    structured_outputs_idx = 3\n    for g in grads:\n        if g is None:\n            result.append(None)\n            continue\n        output = body_grad_graph.structured_outputs[structured_outputs_idx]\n        structured_outputs_idx += 1\n        if isinstance(output, indexed_slices.IndexedSlices):\n            result.append(indexed_slices.IndexedSlices(values=outputs[outputs_idx], indices=outputs[outputs_idx + 1], dense_shape=outputs[outputs_idx + 2]))\n            outputs_idx += 3\n        else:\n            assert isinstance(output, tensor_lib.Tensor)\n            result.append(outputs[outputs_idx])\n            outputs_idx += 1\n    return result",
    "docstring": "Returns the values that should be returned from the while grad function. Args: outputs: the raw Tensor outputs of the grad While op. grads: the input gradients to the gradient function. body_grad_graph: _WhileBodyGradFuncGraph. Returns: A list of gradient values. May include Nones.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_get_structured_grad_output arg:outputs arg:grads arg:body_grad_graph arguments arg arg arg Assign Assign Assign For If Compare Call Assign If Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "debug_nop",
    "source_code": "@make_boxed_compiler\ndef debug_nop(fx_g: fx.GraphModule, _) -> Callable:\n    return DebugInterpreter(fx_g).run",
    "docstring": "Returns a (slow) interpreter over the FX graph module that also checks various debugging properties (e.g., that tracing strides matched real strides.)",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\compilers.py",
    "ast_data": "FunctionDef name:debug_nop arg:fx_g arg:_ arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "new_bounds_to_old",
    "source_code": "def new_bounds_to_old(lb, ub, n):\n    lb = np.broadcast_to(lb, n)\n    ub = np.broadcast_to(ub, n)\n    lb = [float(x) if x > -np.inf else None for x in lb]\n    ub = [float(x) if x < np.inf else None for x in ub]\n    return list(zip(lb, ub))",
    "docstring": "Convert the new bounds representation to the old one. The new representation is a tuple (lb, ub) and the old one is a list containing n tuples, ith containing lower and upper bound on a ith variable. If any of the entries in lb/ub are -np.inf/np.inf they are replaced by None.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_constraints.py",
    "ast_data": "FunctionDef name:new_bounds_to_old arg:lb arg:ub arg:n arguments arg arg arg Assign Call Assign Call Assign Compare Call Assign Compare Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_best_piecewise",
    "source_code": "def _fit_best_piecewise(self, vectors, n_best, n_clusters):\n\n    def make_piecewise(v):\n        centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)\n        return centroid[labels].ravel()\n    piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vectors)\n    dists = np.apply_along_axis(norm, axis=1, arr=vectors - piecewise_vectors)\n    result = vectors[np.argsort(dists)[:n_best]]\n    return result",
    "docstring": "Find the `` vectors that are best approximated by piecewise constant vectors. The piecewise vectors are found by k-means; the best is chosen according to Euclidean distance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bicluster.py",
    "ast_data": "FunctionDef name:_fit_best_piecewise arg:self arg:vectors arg:n_best arg:n_clusters arguments arg arg arg arg FunctionDef name:make_piecewise arg:v arguments arg Assign Call Call Return return:yes Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "default_proc",
    "source_code": "def default_proc(self):\n    pass",
    "docstring": "Process unknown data as a fallback. Called if a more-specific processor is not found for the ``.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:default_proc arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, name):\n    if not isinstance(name, str):\n        raise KeyError('Only string keys are supported')\n    return self.named_transformers[name]",
    "docstring": "Return transformer with name.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:name arguments arg arg If Call Raise Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "transform",
    "source_code": "@classmethod\ndef transform(cls, input: Boxes, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> Boxes:\n    if extra_args is None:\n        extra_args = {}\n    _input = input.clone()\n    if isinstance(module, (K.GeometricAugmentationBase2D,)):\n        _input = module.transform_boxes(_input, cls.get_instance_module_param(param), module.flags, transform=module.transform_matrix, **extra_args)\n    elif isinstance(module, (K.GeometricAugmentationBase3D,)):\n        raise NotImplementedError('The support for 3d box operations are not yet supported. You are welcome to file a PR in our repo.')\n    elif isinstance(module, K.ImageSequential) and (not module.is_intensity_only()):\n        _input = module.transform_boxes(_input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n    elif isinstance(module, K.container.ImageSequentialBase):\n        _input = module.transform_boxes(_input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n    elif isinstance(module, (K.auto.operations.OperationBase,)):\n        return BoxSequentialOps.transform(input, module=module.op, param=param, extra_args=extra_args)\n    return _input",
    "docstring": "Apply a transformation with respect to the parameters. Args: input: the input tensor, (B, N, 4, 2) or (B, 4, 2). module: any torch Module but only kornia augmentation modules will count to apply transformations. param: the corresponding parameters to the module. extra_args: Optional dictionary of extra arguments with specific options for different input types.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\ops.py",
    "ast_data": "FunctionDef name:transform arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg If Compare Assign Assign Call If Call Assign Call Call If Call Raise Call If BoolOp Call Call Assign Call Call If Call Assign Call Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_assets_dir",
    "source_code": "def get_assets_dir(export_dir):\n    return file_io.join(compat.as_text(export_dir), compat.as_text(constants.ASSETS_DIRECTORY))",
    "docstring": "Return path to asset directory in the SavedModel.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\path_helpers.py",
    "ast_data": "FunctionDef name:get_assets_dir arg:export_dir arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "slice",
    "source_code": "@tf_export('slice')\n@dispatch.add_dispatch_support\ndef slice(input_, begin, size, name=None):\n    return gen_array_ops._slice(input_, begin, size, name=name)",
    "docstring": "Extracts a slice from a tensor. See also . This operation extracts a slice of size from a tensor starting at the location specified by . The slice is represented as a tensor shape, where is the number of elements of the 'i'th dimension of that you want to slice. The starting location () for the slice is represented as an offset in each dimension of . In other words, is the offset into the i'th dimension of that you want to slice from. Note that is typically a more pythonic way to perform slices, as it allows you to write instead of . is zero-based; is one-based. If is -1, all remaining elements in dimension i are included in the slice. In other words, this is equivalent to setting: This operation requires that: For example: Args: input_: A . begin: An or . size: An or . name: A name for the operation (optional). Returns: A the same type as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:slice arg:input_ arg:begin arg:size arg:name arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "create_load_const",
    "source_code": "def create_load_const(val, checked=True) -> Instruction:\n    if checked:\n        assert is_safe_constant(val), f'unsafe constant {val}'\n    return create_instruction('LOAD_CONST', argval=val)",
    "docstring": "In general we should only create for immutable objects, but sometimes it's convenient _and safe_ for Dynamo create for mutable objects. In such cases, use .",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:create_load_const arg:val arg:checked arguments arg arg If Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "format_live_refs",
    "source_code": "def format_live_refs(ignore: Any=NoneType) -> str:\n    s = 'Live References\\n\\n'\n    now = time()\n    for cls, wdict in sorted(live_refs.items(), key=lambda x: x[0].__name__):\n        if not wdict:\n            continue\n        if issubclass(cls, ignore):\n            continue\n        oldest = min(wdict.values())\n        s += f'{cls.__name__:<30} {len(wdict):6}   oldest: {int(now - oldest)}s ago\\n'\n    return s",
    "docstring": "Return a tabular representation of tracked objects",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\trackref.py",
    "ast_data": "FunctionDef name:format_live_refs arg:ignore arguments arg Assign Assign Call For Call Call arguments arg If If Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "JitTypeTraceStoreLogger",
    "source_code": "class JitTypeTraceStoreLogger(CallTraceStoreLogger):\n\n    def __init__(self, store: CallTraceStore):\n        super().__init__(store)\n\n    def log(self, trace: CallTrace) -> None:\n        self.traces.append(trace)",
    "docstring": "A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore.",
    "type": "class",
    "file_path": "pytorch\\torch\\jit\\_monkeytype_config.py",
    "ast_data": "ClassDef name:JitTypeTraceStoreLogger FunctionDef name:__init__ arg:self arg:store arguments arg arg Call Call FunctionDef name:log arg:self arg:trace arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, device=''):\n    self._resource_handle_value = None\n    self._resource_device = device\n    self._self_destruction_context = context.eager_mode if context.executing_eagerly() else ops.get_default_graph().as_default",
    "docstring": "Initialize the . Args: device: A string indicating a required placement for this resource, e.g. \"CPU\" if this resource must be created on a CPU device. A blank device allows the user to place resource creation, so generally this should be blank unless the resource only makes sense on one device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\resource.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:device arguments arg arg Assign Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_deserialize_keras_object",
    "source_code": "def _deserialize_keras_object(identifier, module_objects=None, custom_objects=None, printable_module_name='object'):\n    if identifier is None:\n        return None\n    if isinstance(identifier, dict):\n        config = identifier\n        cls, cls_config = _class_and_config_for_serialized_keras_object(config, module_objects, custom_objects, printable_module_name)\n        if hasattr(cls, 'from_config'):\n            arg_spec = tf_inspect.getfullargspec(cls.from_config)\n            custom_objects = custom_objects or {}\n            if 'custom_objects' in arg_spec.args:\n                return cls.from_config(cls_config, custom_objects=dict(list(custom_objects.items())))\n            return cls.from_config(cls_config)\n        else:\n            custom_objects = custom_objects or {}\n            return cls(**cls_config)\n    elif isinstance(identifier, six.string_types):\n        object_name = identifier\n        if custom_objects and object_name in custom_objects:\n            obj = custom_objects.get(object_name)\n        else:\n            obj = module_objects.get(object_name)\n            if obj is None:\n                raise ValueError('Unknown ' + printable_module_name + ': ' + object_name)\n        if tf_inspect.isclass(obj):\n            return obj()\n        return obj\n    elif tf_inspect.isfunction(identifier):\n        return identifier\n    else:\n        raise ValueError('Could not interpret serialized %s: %s' % (printable_module_name, identifier))",
    "docstring": "Turns the serialized form of a Keras object back into an actual object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\serialization.py",
    "ast_data": "FunctionDef name:_deserialize_keras_object arg:identifier arg:module_objects arg:custom_objects arg:printable_module_name arguments arg arg arg arg If Compare Return return:no If Call Assign Assign Call If Call Assign Call Assign BoolOp If Compare Return return:yes Call Call Call Call Return return:yes Call Assign BoolOp Return return:yes Call If Call Assign If BoolOp Compare Assign Call Assign Call If Compare Raise Call If Call Return return:yes Call Return return:yes If Call Return return:yes Raise Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, inp: Union[Tensor, UntypedStorage], extra_ref_check: Optional[Callable[[], bool]]=None) -> None:\n    if isinstance(inp, Tensor):\n        stor = inp.untyped_storage()\n    else:\n        assert isinstance(inp, UntypedStorage)\n        stor = inp\n    self.ref = StorageWeakRef(stor)\n    self._data_ptr = stor.data_ptr()\n    self.extra_ref_check = extra_ref_check",
    "docstring": "extra_ref_check is an additional check we need to run to check if the weak ref has expired. in checking storage use count we assume extra_ref_check will hold an additional reference to the storage.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:inp arg:extra_ref_check arguments arg arg arg If Call Assign Call Call Assign Assign Call Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_label",
    "source_code": "def set_label(self, s):\n    raise RuntimeError('A legend label cannot be assigned to an Axis. Did you mean to set the axis label via set_label_text()?')",
    "docstring": "Assigning legend labels is not supported. Raises RuntimeError.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_label arg:self arg:s arguments arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "exponpow_gen",
    "source_code": "class exponpow_gen(rv_continuous):\n\n    def _shape_info(self):\n        return [_ShapeInfo('b', False, (0, np.inf), (False, False))]\n\n    def _pdf(self, x, b):\n        return np.exp(self._logpdf(x, b))\n\n    def _logpdf(self, x, b):\n        xb = x ** b\n        f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)\n        return f\n\n    def _cdf(self, x, b):\n        return -sc.expm1(-sc.expm1(x ** b))\n\n    def _sf(self, x, b):\n        return np.exp(-sc.expm1(x ** b))\n\n    def _isf(self, x, b):\n        return sc.log1p(-np.log(x)) ** (1.0 / b)\n\n    def _ppf(self, q, b):\n        return pow(sc.log1p(-sc.log1p(-q)), 1.0 / b)",
    "docstring": "An exponential power continuous random variable. %(before_notes)s Notes ----- The probability density function for is: .. math:: f(x, b) = b x^{b-1} \\exp(1 + x^b - \\exp(x^b)) for :math:, :math:. Note that this is a different distribution from the exponential power distribution that is also known under the names \"generalized normal\" or \"generalized Gaussian\". takes `b`. %(after_notes)s References ---------- %(example)s",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "ClassDef name:exponpow_gen FunctionDef name:_shape_info arg:self arguments arg Return return:yes Call FunctionDef name:_pdf arg:self arg:x arg:b arguments arg arg arg Return return:yes Call Call FunctionDef name:_logpdf arg:self arg:x arg:b arguments arg arg arg Assign Assign Call Call Call Return return:yes FunctionDef name:_cdf arg:self arg:x arg:b arguments arg arg arg Return return:yes Call Call FunctionDef name:_sf arg:self arg:x arg:b arguments arg arg arg Return return:yes Call Call FunctionDef name:_isf arg:self arg:x arg:b arguments arg arg arg Return return:yes Call Call FunctionDef name:_ppf arg:self arg:q arg:b arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "grad",
    "source_code": "def grad(self, source, flow=None, name=None):\n    if flow is None:\n        flow = self.flow\n    with ops.name_scope(name, 'TensorArrayGrad', [self._handle]):\n        with ops.colocate_with(self._handle):\n            g_handle, unused_flow = gen_data_flow_ops.tensor_array_grad_v3(handle=self._handle, source=source, flow_in=flow, name=name)\n            with ops.control_dependencies([g_handle]):\n                flow = array_ops.identity(flow, name='gradient_flow')\n            g = TensorArray(dtype=self._dtype, handle=g_handle, flow=flow, infer_shape=self._infer_shape, colocate_with_first_write_call=False)\n            g._implementation._element_shape = self._element_shape\n            return g",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:grad arg:self arg:source arg:flow arg:name arguments arg arg arg arg If Compare Assign With Call With Call Assign Call With Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "disconnect",
    "source_code": "def disconnect(self, cid):\n    self._observers.disconnect(cid)",
    "docstring": "Remove the observer with connection id *cid*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:disconnect arg:self arg:cid arguments arg arg Call"
  },
  {
    "library": "scipy",
    "name": "butter",
    "source_code": "def butter(N, Wn, btype='low', analog=False, output='ba', fs=None):\n    return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='butter', fs=fs)",
    "docstring": "Butterworth digital and analog filter design. Design an Nth-order digital or analog Butterworth filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. For 'bandpass' and 'bandstop' filters, the resulting order of the final second-order sections ('sos') matrix is `NfsWnWnfsfsWnfsWnbafreqz`) format): >>> sos = signal.butter(10, 15, 'hp', fs=1000, output='sos') >>> filtered = signal.sosfilt(sos, sig) >>> ax2.plot(t, filtered) >>> ax2.set_title('After 15 Hz high-pass filter') >>> ax2.axis([0, 1, -2, 2]) >>> ax2.set_xlabel('Time [s]') >>> plt.tight_layout() >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:butter arg:N arg:Wn arg:btype arg:analog arg:output arg:fs arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "get_shape",
    "source_code": "def get_shape(self):\n    warnings.warn('\"get_shape\" is deprecated. Use \"shape\" instead', DeprecationWarning, stacklevel=2)\n    return self.shape",
    "docstring": "Deprecated getter for the property. .. deprecated:: 1.21",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:get_shape arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "PrivateKeyJWT",
    "source_code": "class PrivateKeyJWT(ClientSecretJWT):\n    name = 'private_key_jwt'\n    alg = 'RS256'\n\n    def sign(self, auth, token_endpoint):\n        return private_key_jwt_sign(auth.client_secret, client_id=auth.client_id, token_endpoint=token_endpoint, claims=self.claims, header=self.headers, alg=self.alg)",
    "docstring": "Authentication method for OAuth 2.0 Client. This authentication method is called `` value, default is RS256",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc7523\\auth.py",
    "ast_data": "ClassDef name:PrivateKeyJWT Assign Assign FunctionDef name:sign arg:self arg:auth arg:token_endpoint arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "count_contains",
    "source_code": "def count_contains(self, vertices):\n    if len(vertices) == 0:\n        return 0\n    vertices = np.asarray(vertices)\n    with np.errstate(invalid='ignore'):\n        return ((self.min < vertices) & (vertices < self.max)).all(axis=1).sum()",
    "docstring": "Count the number of vertices contained in the . Any vertices with a non-finite x or y value are ignored. Parameters ---------- vertices : (N, 2) array",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:count_contains arg:self arg:vertices arguments arg arg If Compare Call Return return:yes Assign Call With Call Return return:yes Call Call Compare Compare"
  },
  {
    "library": "pytorch",
    "name": "save_config_portable",
    "source_code": "def save_config_portable(self) -> dict[str, Any]:\n    prefixes = ['_']\n    prefixes.extend(getattr(self, '_cache_config_ignore_prefix', []))\n    return self._get_dict(ignored_prefixes=prefixes)",
    "docstring": "Convert config to portable format",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_config_module.py",
    "ast_data": "FunctionDef name:save_config_portable arg:self arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_concat_cast_helper",
    "source_code": "def _concat_cast_helper(tensors, out=None, dtype=None, casting='same_kind'):\n    if out is not None or dtype is not None:\n        out_dtype = out.dtype.torch_dtype if dtype is None else dtype\n    else:\n        out_dtype = _dtypes_impl.result_type_impl(*tensors)\n    tensors = _util.typecast_tensors(tensors, out_dtype, casting)\n    return tensors",
    "docstring": "Figure out dtypes, cast if necessary.",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_funcs_impl.py",
    "ast_data": "FunctionDef name:_concat_cast_helper arg:tensors arg:out arg:dtype arg:casting arguments arg arg arg arg If BoolOp Compare Compare Assign Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "rfind",
    "source_code": "@set_module('numpy.strings')\ndef rfind(a, sub, start=0, end=None):\n    end = end if end is not None else MAX\n    return _rfind_ufunc(a, sub, start, end)",
    "docstring": "For each element, return the highest index in the string where substring `` dtype The substring to search for. start, end : array_like, with any integer dtype The range to look in, interpreted as in slice notation. Returns ------- y : ndarray Output array of ints See Also -------- str.rfind Examples -------- >>> import numpy as np >>> a = np.array([\"Computer Science\"]) >>> np.strings.rfind(a, \"Science\", start=0, end=None) array([9]) >>> np.strings.rfind(a, \"Science\", start=0, end=8) array([-1]) >>> b = np.array([\"Computer Science\", \"Science\"]) >>> np.strings.rfind(b, \"Science\", start=0, end=None) array([9, 0])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:rfind arg:a arg:sub arg:start arg:end arguments arg arg arg arg Assign Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "AttributeSentinel",
    "source_code": "class AttributeSentinel(object):\n\n    def __init__(self, always_propagate=False):\n        self._parents = weakref.WeakSet()\n        self.attributes = collections.defaultdict(MutationSentinel)\n        self.always_propagate = always_propagate\n\n    def __repr__(self):\n        return '{}\\n  {}'.format(super(AttributeSentinel, self).__repr__(), {k: v.in_cached_state for k, v in self.attributes.items()})\n\n    def add_parent(self, node):\n        self._parents.add(node)\n        node.invalidate_all()\n\n    def get(self, key):\n        return self.attributes[key].in_cached_state\n\n    def _set(self, key, value):\n        may_affect_upstream = self.attributes[key].mark_as(value)\n        if may_affect_upstream or self.always_propagate:\n            for node in self._parents:\n                node.invalidate(key)\n\n    def mark_cached(self, key):\n        self._set(key, True)\n\n    def invalidate(self, key):\n        self._set(key, False)\n\n    def invalidate_all(self):\n        for key in self.attributes.keys():\n            self.attributes[key].mark_as(False)\n        for node in self._parents:\n            node.invalidate_all()",
    "docstring": "Container for managing attribute cache state within a Layer. The cache can be invalidated either on an individual basis (for instance when an attribute is mutated) or a layer-wide basis (such as when a new dependency is added).",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\layer_utils.py",
    "ast_data": "ClassDef name:AttributeSentinel FunctionDef name:__init__ arg:self arg:always_propagate arguments arg arg Assign Call Assign Call Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call Call FunctionDef name:add_parent arg:self arg:node arguments arg arg Call Call FunctionDef name:get arg:self arg:key arguments arg arg Return return:yes FunctionDef name:_set arg:self arg:key arg:value arguments arg arg arg Assign Call If BoolOp For Call FunctionDef name:mark_cached arg:self arg:key arguments arg arg Call FunctionDef name:invalidate arg:self arg:key arguments arg arg Call FunctionDef name:invalidate_all arg:self arguments arg For Call Call For Call"
  },
  {
    "library": "pandas",
    "name": "construct_array_type",
    "source_code": "@classmethod\ndef construct_array_type(cls) -> type_t[NumpyExtensionArray]:\n    from pandas.core.arrays import NumpyExtensionArray\n    return NumpyExtensionArray",
    "docstring": "Return the array type associated with this dtype. Returns ------- type",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:construct_array_type arg:cls arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_loc",
    "source_code": "def get_loc(self):\n    return self._loc",
    "docstring": "Return the tick location (data coords) as a scalar.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_loc arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_implements_train_batch_hooks",
    "source_code": "def _implements_train_batch_hooks(self):\n    return not generic_utils.is_default(self.on_batch_begin) or not generic_utils.is_default(self.on_batch_end) or (not generic_utils.is_default(self.on_train_batch_begin)) or (not generic_utils.is_default(self.on_train_batch_end))",
    "docstring": "Determines if this Callback should be called for each train batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_implements_train_batch_hooks arg:self arguments arg Return return:yes BoolOp Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "eager_mode",
    "source_code": "@tf_export('__internal__.eager_context.eager_mode', v1=[])\ndef eager_mode():\n    return context()._mode(EAGER_MODE)",
    "docstring": "Context-manager to enable eager execution for the current thread.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:eager_mode arguments Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "scatter",
    "source_code": "def scatter(tensor, devices=None, chunk_sizes=None, dim=0, streams=None, *, out=None):\n    tensor = _handle_complex(tensor)\n    if out is None:\n        devices = [_get_device_index(d) for d in devices]\n        return tuple(torch._C._scatter(tensor, devices, chunk_sizes, dim, streams))\n    else:\n        if devices is not None:\n            raise RuntimeError(f\"'devices' must not be specified when 'out' is specified, but got devices={devices}\")\n        if chunk_sizes is not None:\n            raise RuntimeError(f\"'chunk_sizes' must not be specified when 'out' is specified, but got chunk_sizes={chunk_sizes}\")\n        return tuple(torch._C._scatter_out(tensor, out, dim, streams))",
    "docstring": "Scatters tensor across multiple GPUs. Args: tensor (Tensor): tensor to scatter. Can be on CPU or GPU. devices (Iterable[torch.device, str or int], optional): an iterable of GPU devices, among which to scatter. chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on each device. It should match :attr: in length and sums to `tensortensortensordimdevicesoutoutchunk_sizesoutdevicestensordevicesoutouttensor`.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\parallel\\comm.py",
    "ast_data": "FunctionDef name:scatter arg:tensor arg:devices arg:chunk_sizes arg:dim arg:streams arguments arg arg arg arg arg arg Assign Call If Compare Assign Call Return return:yes Call Call If Compare Raise Call If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_crc32_options",
    "source_code": "def set_crc32_options(compute_crc32: bool):\n    from torch.utils.serialization import config\n    config.save.compute_crc32 = compute_crc32",
    "docstring": "Set whether :func: computes and writes crc32 for each record. .. note:: Setting this to `` will be able to load the file. Args: compute_crc32 (bool): set crc32 compuation flag",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:set_crc32_options arg:compute_crc32 arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "max_size",
    "source_code": "def max_size() -> int:\n    return _MAX_SIZE",
    "docstring": "Returns the maximum size each proto chunk.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\constants.py",
    "ast_data": "FunctionDef name:max_size arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "TopKCategoricalAccuracy",
    "source_code": "class TopKCategoricalAccuracy(MeanMetricWrapper):\n\n    def __init__(self, k=5, name='top_k_categorical_accuracy', dtype=None):\n        super(TopKCategoricalAccuracy, self).__init__(top_k_categorical_accuracy, name, dtype=dtype, k=k)",
    "docstring": "Computes how often targets are in the top predictions. Args: k: (Optional) Number of top elements to look at for computing accuracy. Defaults to 5. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. Standalone usage: >>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1) >>> m.update_state([[0, 0, 1], [0, 1, 0]], ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]) >>> m.result().numpy() 0.5 >>> m.reset_state() >>> m.update_state([[0, 0, 1], [0, 1, 0]], ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]], ... sample_weight=[0.7, 0.3]) >>> m.result().numpy() 0.3 Usage with API:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "ClassDef name:TopKCategoricalAccuracy FunctionDef name:__init__ arg:self arg:k arg:name arg:dtype arguments arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "StackTraceMapper",
    "source_code": "class StackTraceMapper(tf_stack.StackTraceMapper):\n\n    def __init__(self, converted_fn):\n        super().__init__()\n        self._source_map = converted_fn.ag_source_map\n        self._cached_map = None\n\n    def get_effective_source_map(self):\n        if self._cached_map is not None:\n            return self._cached_map\n        parent_map = self.parent.get_effective_source_map()\n        effective_source_map = {}\n        for loc, origin in self._source_map.items():\n            effective_source_map[loc.filename, loc.lineno] = (origin.loc.filename, origin.loc.lineno, origin.function_name)\n        for key, value in parent_map.items():\n            filename, lineno, _ = value\n            value_loc = origin_info.LineLocation(filename=filename, lineno=lineno)\n            if value_loc in self._source_map:\n                origin = self._source_map[value_loc]\n                effective_source_map[key] = (origin.loc.filename, origin.loc.lineno, origin.function_name)\n            else:\n                effective_source_map[key] = value\n        self._cached_map = effective_source_map\n        return effective_source_map",
    "docstring": "Remaps generated code to code it originated from.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py",
    "ast_data": "ClassDef name:StackTraceMapper FunctionDef name:__init__ arg:self arg:converted_fn arguments arg arg Call Call Assign Assign FunctionDef name:get_effective_source_map arg:self arguments arg If Compare Return return:yes Assign Call Assign For Call Assign For Call Assign Assign Call If Compare Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "univariate",
    "source_code": "@property\ndef univariate(self):\n    return bool({'x', 'y'} - set(self.variables))",
    "docstring": "Return True if only x or y are used.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\distributions.py",
    "ast_data": "FunctionDef name:univariate arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "setdiff1d",
    "source_code": "@deprecation.deprecated('2018-11-30', 'This op will be removed after the deprecation date. Please switch to tf.sets.difference().')\n@tf_export(v1=['setdiff1d'])\n@dispatch.add_dispatch_support\ndef setdiff1d(x, y, index_dtype=dtypes.int32, name=None):\n    return gen_array_ops.list_diff(x, y, index_dtype, name)",
    "docstring": "Computes the difference between two lists of numbers or strings. Given a list x and a list y, this operation returns a list out that represents all values that are in x but not in y. The returned list out is sorted in the same order that the numbers appear in x (duplicates are preserved). This operation also returns a list idx that represents the position of each out element in x. In other words: Example usage: >>> x = [1, 2, 3, 4, 5, 6] >>> y = [1, 3, 5] >>> setdiff1d(x,y) ListDiff(out=, idx=) Args: x: A Tensor. 1-D. Values to keep. y: A Tensor. Must have the same type as x. 1-D. Values to remove. out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to tf.int32. name: A name for the operation (optional). Returns: A tuple of Tensor objects (out, idx). out: A Tensor. Has the same type as x. idx: A Tensor of type out_idx.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:setdiff1d arg:x arg:y arg:index_dtype arg:name arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "to_arg_names",
    "source_code": "def to_arg_names(function_type):\n    arg_names = []\n    for p in function_type.parameters.values():\n        if p.kind in {function_type_lib.Parameter.POSITIONAL_ONLY, function_type_lib.Parameter.POSITIONAL_OR_KEYWORD}:\n            arg_names.append(p.name)\n    return arg_names",
    "docstring": "Generates a list of arg names from a FunctionType.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\function_type_utils.py",
    "ast_data": "FunctionDef name:to_arg_names arg:function_type arguments arg Assign For Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "FIFOQueue",
    "source_code": "@tf_export('queue.FIFOQueue', v1=['queue.FIFOQueue', 'FIFOQueue'])\n@deprecation.deprecated_endpoints('FIFOQueue')\nclass FIFOQueue(QueueBase):\n\n    def __init__(self, capacity, dtypes, shapes=None, names=None, shared_name=None, name='fifo_queue'):\n        dtypes = _as_type_list(dtypes)\n        shapes = _as_shape_list(shapes, dtypes)\n        names = _as_name_list(names, dtypes)\n        with ops.init_scope(), ops.device('CPU'):\n            queue_ref = gen_data_flow_ops.fifo_queue_v2(component_types=dtypes, shapes=shapes, capacity=capacity, shared_name=_shared_name(shared_name), name=name)\n        super(FIFOQueue, self).__init__(dtypes, shapes, names, queue_ref)",
    "docstring": "A queue implementation that dequeues elements in first-in first-out order. See for a description of the methods on this class.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "ClassDef name:FIFOQueue FunctionDef name:__init__ arg:self arg:capacity arg:dtypes arg:shapes arg:names arg:shared_name arg:name arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call With Call Call Assign Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "tuple_types",
    "source_code": "@property\ndef tuple_types(self):\n    return self._tuple_types",
    "docstring": "Returns the types of the InfeedQueue tuple elements.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py",
    "ast_data": "FunctionDef name:tuple_types arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_username",
    "source_code": "def get_username(self):\n    return getattr(self, self.USERNAME_FIELD)",
    "docstring": "Return the username for this User.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\base_user.py",
    "ast_data": "FunctionDef name:get_username arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "item_error",
    "source_code": "def item_error(self, item: Any, exception: BaseException, response: Response | None, spider: Spider) -> LogFormatterResult:\n    return {'level': logging.ERROR, 'msg': ITEMERRORMSG, 'args': {'item': item}}",
    "docstring": "Logs a message when an item causes an error while it is passing through the item pipeline. .. versionadded:: 2.0",
    "type": "method",
    "file_path": "scrapy\\scrapy\\logformatter.py",
    "ast_data": "FunctionDef name:item_error arg:self arg:item arg:exception arg:response arg:spider arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_ncols",
    "source_code": "def set_ncols(self, ncols):\n    self._ncols = ncols",
    "docstring": "Set the number of columns.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:set_ncols arg:self arg:ncols arguments arg arg Assign"
  },
  {
    "library": "scikit-learn",
    "name": "set_score_request",
    "source_code": "def set_score_request(self, **kwargs):\n    if not _routing_enabled():\n        raise RuntimeError('This method is only available when metadata routing is enabled. You can enable it using sklearn.set_config(enable_metadata_routing=True).')\n    self._warn_overlap(message='You are setting metadata request for parameters which are already set as kwargs for this metric. These set values will be overridden by passed metadata if provided. Please pass them either as metadata or kwargs to `make_scorer`.', kwargs=kwargs)\n    self._metadata_request = MetadataRequest(owner=self.__class__.__name__)\n    for param, alias in kwargs.items():\n        self._metadata_request.score.add_request(param=param, alias=alias)\n    return self",
    "docstring": "Set requested parameters by the scorer. Please see :ref: on how the routing mechanism works. .. versionadded:: 1.3 Parameters ---------- kwargs : dict Arguments should be of the form `alias`.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py",
    "ast_data": "FunctionDef name:set_score_request arg:self arguments arg arg If Call Raise Call Call Assign Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "batch_normalization",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=0.001):\n    if ndim(x) == 4:\n        if axis == 1 or axis == -3:\n            tf_data_format = 'NCHW'\n        elif axis == 3 or axis == -1:\n            tf_data_format = 'NHWC'\n        else:\n            tf_data_format = None\n        if tf_data_format == 'NHWC' or (tf_data_format == 'NCHW' and _has_nchw_support()):\n            if ndim(mean) > 1:\n                mean = array_ops.reshape(mean, [-1])\n            if ndim(var) > 1:\n                var = array_ops.reshape(var, [-1])\n            if beta is None:\n                beta = zeros_like(mean)\n            elif ndim(beta) > 1:\n                beta = array_ops.reshape(beta, [-1])\n            if gamma is None:\n                gamma = ones_like(mean)\n            elif ndim(gamma) > 1:\n                gamma = array_ops.reshape(gamma, [-1])\n        y, _, _ = nn.fused_batch_norm(x, gamma, beta, epsilon=epsilon, mean=mean, variance=var, data_format=tf_data_format, is_training=False)\n        return y\n    return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)",
    "docstring": "Applies batch normalization on x given mean, var, beta and gamma. I.e. returns: Args: x: Input tensor or variable. mean: Mean of batch. var: Variance of batch. beta: Tensor with which to center the input. gamma: Tensor by which to scale the input. axis: Integer, the axis that should be normalized. (typically the features axis). epsilon: Fuzz factor. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:batch_normalization arg:x arg:mean arg:var arg:beta arg:gamma arg:axis arg:epsilon arguments arg arg arg arg arg arg arg If Compare Call If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign Assign If BoolOp Compare BoolOp Compare Call If Compare Call Assign Call If Compare Call Assign Call If Compare Assign Call If Compare Call Assign Call If Compare Assign Call If Compare Call Assign Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "listdir",
    "source_code": "def listdir(self):\n    if self._isurl(self._baseurl):\n        raise NotImplementedError('Directory listing of URLs, not supported yet.')\n    else:\n        return os.listdir(self._baseurl)",
    "docstring": "List files in the source Repository. Returns ------- files : list of str or pathlib.Path List of file names (not containing a directory part). Notes ----- Does not currently work for remote repositories.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:listdir arg:self arguments arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "translate",
    "source_code": "def translate(tensor: Tensor, translation: Tensor, mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=True) -> Tensor:\n    if not isinstance(tensor, Tensor):\n        raise TypeError(f'Input tensor type is not a Tensor. Got {type(tensor)}')\n    if not isinstance(translation, Tensor):\n        raise TypeError(f'Input translation type is not a Tensor. Got {type(translation)}')\n    if len(tensor.shape) not in (3, 4):\n        raise ValueError(f'Invalid tensor shape, we expect CxHxW or BxCxHxW. Got: {tensor.shape}')\n    translation_matrix: Tensor = _compute_translation_matrix(translation)\n    return affine(tensor, translation_matrix[..., :2, :3], mode, padding_mode, align_corners)",
    "docstring": "Translate the tensor in pixel units. .. image:: _static/img/translate.png Args: tensor: The image tensor to be warped in shapes of :math:. translation: tensor containing the amount of pixels to translate in the x and y direction. The tensor must have a shape of (B, 2), where B is batch size, last dimension contains dx dy. mode: interpolation mode to calculate output values ``. align_corners: interpolation flag. Returns: The translated tensor with shape as input. Example: >>> img = torch.rand(1, 3, 4, 4) >>> translation = torch.tensor([[1., 0.]]) >>> out = translate(img, translation) >>> print(out.shape) torch.Size([1, 3, 4, 4])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py",
    "ast_data": "FunctionDef name:translate arg:tensor arg:translation arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg If Call Raise Call Call If Call Raise Call Call If Compare Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "refs_expression",
    "source_code": "def refs_expression(lookup_parts, annotations):\n    for n in range(1, len(lookup_parts) + 1):\n        level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])\n        if annotations.get(level_n_lookup):\n            return (level_n_lookup, lookup_parts[n:])\n    return (None, ())",
    "docstring": "Check if the lookup_parts contains references to the given annotations set. Because the LOOKUP_SEP is contained in the default annotation names, check each prefix of the lookup_parts for a match.",
    "type": "function",
    "file_path": "django\\django\\db\\models\\query_utils.py",
    "ast_data": "FunctionDef name:refs_expression arg:lookup_parts arg:annotations arguments arg arg For Call Call Assign Call If Call Return return:yes Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_init_inter_node_process_group",
    "source_code": "@no_type_check\ndef _init_inter_node_process_group(global_process_group: dist.ProcessGroup, num_devices_per_node: int) -> dist.ProcessGroup:\n    inter_node_pg = None\n    sharding_backend = dist.get_backend(global_process_group)\n    world_size = dist.get_world_size(global_process_group)\n    num_nodes = world_size // num_devices_per_node\n    my_local_rank = dist.get_rank(global_process_group) % num_devices_per_node\n    for local_rank in range(num_devices_per_node):\n        ranks_for_inter_group = [local_rank + i * num_devices_per_node for i in range(num_nodes)]\n        grp = dist.new_group(ranks=ranks_for_inter_group, backend=sharding_backend)\n        if local_rank == my_local_rank:\n            inter_node_pg = grp\n    assert inter_node_pg is not None, f'{my_local_rank} expected to assign inter-node pg, but did not'\n    return inter_node_pg",
    "docstring": "Return an inter-node process group where each contained rank has the same local rank. For example, given each row is a distinct node: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 This API would return inter-node process group [0, 8], [1, 9], [2, 10], and so forth depending on the process's rank. For example, rank 1 would get [1, 9], rank 5 would get [5, 13].",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_init_utils.py",
    "ast_data": "FunctionDef name:_init_inter_node_process_group arg:global_process_group arg:num_devices_per_node arguments arg arg Assign Assign Call Assign Call Assign Assign Call For Call Assign Call Assign Call If Compare Assign Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "groups",
    "source_code": "def groups(self) -> list:\n    _tables()\n    self._check_if_open()\n    assert self._handle is not None\n    assert _table_mod is not None\n    return [g for g in self._handle.walk_groups() if not isinstance(g, _table_mod.link.Link) and (getattr(g._v_attrs, 'pandas_type', None) or getattr(g, 'table', None) or (isinstance(g, _table_mod.table.Table) and g._v_name != 'table'))]",
    "docstring": "Return a list of all the top-level nodes. Each node returned is not a pandas storage object. Returns ------- list List of objects. See Also -------- HDFStore.get_node : Returns the node with the key. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=[\"A\", \"B\"]) >>> store = pd.HDFStore(\"store.h5\", \"w\") # doctest: +SKIP >>> store.put(\"data\", df) # doctest: +SKIP >>> print(store.groups()) # doctest: +SKIP >>> store.close() # doctest: +SKIP [/data (Group) '' children := ['axis0' (Array), 'axis1' (Array), 'block0_values' (Array), 'block0_items' (Array)]]",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:groups arg:self arguments arg Call Call Compare Compare Return return:yes Call BoolOp Call BoolOp Call Call BoolOp Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "experimental_should_init",
    "source_code": "@property\ndef experimental_should_init(self):\n    return self._strategy.extended.experimental_should_init",
    "docstring": "Whether to run init ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:experimental_should_init arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@abc.abstractproperty\ndef name(self):\n    pass",
    "docstring": "Returns string. Used for naming and for name_scope.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_validate_pruning_dim",
    "source_code": "def _validate_pruning_dim(t, dim):\n    if dim >= t.dim():\n        raise IndexError(f'Invalid index {dim} for tensor of size {t.shape}')",
    "docstring": "Validate that the pruning dimension is within the bounds of the tensor dimension. Args: t (torch.Tensor): tensor representing the parameter to prune dim (int): index of the dim along which we define channels to prune",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:_validate_pruning_dim arg:t arg:dim arguments arg arg If Compare Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "parse_attribute",
    "source_code": "@classmethod\ndef parse_attribute(cls, name, attr_string):\n    if attr_string[0] == '{':\n        values = cls._get_nom_val(attr_string)\n        return cls(name, values)\n    else:\n        return None",
    "docstring": "Parse the attribute line if it knows how. Returns the parsed attribute, or None. For nominal attributes, the attribute string would be like '{, , }'.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:parse_attribute arg:cls arg:name arg:attr_string arguments arg arg arg If Compare Assign Call Return return:yes Call Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "num2date",
    "source_code": "def num2date(x, tz=None):\n    tz = _get_tzinfo(tz)\n    return _from_ordinalf_np_vectorized(x, tz).tolist()",
    "docstring": "Convert Matplotlib dates to objects. Parameters ---------- x : float or sequence of floats Number of days (fraction part represents hours, minutes, seconds) since the epoch. See for the epoch, which can be changed by :rc: or . tz : str or , default: :rc: Timezone of *x*. If a string, *tz* is passed to . Returns ------- or sequence of Dates are returned in timezone *tz*. If *x* is a sequence, a sequence of objects will be returned. Notes ----- The Gregorian calendar is assumed; this is not universal practice. For details, see the module docstring.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:num2date arg:x arg:tz arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X, y=None, groups=None):\n    n_repeats = self.n_repeats\n    rng = check_random_state(self.random_state)\n    for idx in range(n_repeats):\n        cv = self.cv(random_state=rng, shuffle=True, **self.cvargs)\n        for train_index, test_index in cv.split(X, y, groups):\n            yield (train_index, test_index)",
    "docstring": "Generates indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) The target variable for supervised learning problems. groups : array-like of shape (n_samples,), default=None Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg Assign Assign Call For Call Assign Call For Call"
  },
  {
    "library": "scipy",
    "name": "gompertz_gen",
    "source_code": "class gompertz_gen(rv_continuous):\n\n    def _shape_info(self):\n        return [_ShapeInfo('c', False, (0, np.inf), (False, False))]\n\n    def _pdf(self, x, c):\n        return np.exp(self._logpdf(x, c))\n\n    def _logpdf(self, x, c):\n        return np.log(c) + x - c * sc.expm1(x)\n\n    def _cdf(self, x, c):\n        return -sc.expm1(-c * sc.expm1(x))\n\n    def _ppf(self, q, c):\n        return sc.log1p(-1.0 / c * sc.log1p(-q))\n\n    def _sf(self, x, c):\n        return np.exp(-c * sc.expm1(x))\n\n    def _isf(self, p, c):\n        return sc.log1p(-np.log(p) / c)\n\n    def _entropy(self, c):\n        return 1.0 - np.log(c) - sc._ufuncs._scaled_exp1(c) / c",
    "docstring": "A Gompertz (or truncated Gumbel) continuous random variable. %(before_notes)s Notes ----- The probability density function for is: .. math:: f(x, c) = c \\exp(x) \\exp(-c (e^x-1)) for :math:, :math:. takes `c`. %(after_notes)s %(example)s",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "ClassDef name:gompertz_gen FunctionDef name:_shape_info arg:self arguments arg Return return:yes Call FunctionDef name:_pdf arg:self arg:x arg:c arguments arg arg arg Return return:yes Call Call FunctionDef name:_logpdf arg:self arg:x arg:c arguments arg arg arg Return return:yes Call Call FunctionDef name:_cdf arg:self arg:x arg:c arguments arg arg arg Return return:yes Call Call FunctionDef name:_ppf arg:self arg:q arg:c arguments arg arg arg Return return:yes Call Call FunctionDef name:_sf arg:self arg:x arg:c arguments arg arg arg Return return:yes Call Call FunctionDef name:_isf arg:self arg:p arg:c arguments arg arg arg Return return:yes Call Call FunctionDef name:_entropy arg:self arg:c arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "MaxNorm",
    "source_code": "class MaxNorm(Constraint):\n\n    def __init__(self, max_value=2, axis=0):\n        self.max_value = max_value\n        self.axis = axis\n\n    @doc_controls.do_not_generate_docs\n    def __call__(self, w):\n        norms = backend.sqrt(math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))\n        desired = backend.clip(norms, 0, self.max_value)\n        return w * (desired / (backend.epsilon() + norms))\n\n    @doc_controls.do_not_generate_docs\n    def get_config(self):\n        return {'max_value': self.max_value, 'axis': self.axis}",
    "docstring": "MaxNorm weight constraint. Constrains the weights incident to each hidden unit to have a norm less than or equal to a desired value. Also available via the shortcut function . Args: max_value: the maximum norm value for the incoming weights. axis: integer, axis along which to calculate weight norms. For instance, in a layer the weight matrix has shape , set to to constrain each weight vector of length . In a layer with , the weight tensor has shape , set to to constrain the weights of each filter tensor of size .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\constraints.py",
    "ast_data": "ClassDef name:MaxNorm FunctionDef name:__init__ arg:self arg:max_value arg:axis arguments arg arg arg Assign Assign FunctionDef name:__call__ arg:self arg:w arguments arg arg Assign Call Call Call Assign Call Return return:yes Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "is_alias",
    "source_code": "@staticmethod\n@cache\ndef is_alias(method):\n    ds = inspect.getdoc(method)\n    if ds is None:\n        return False\n    return ds.startswith('Alias for ')",
    "docstring": "Return whether the object *method* is an alias for another method.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:is_alias arg:method arguments arg Assign Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_categorical_column_weighted",
    "source_code": "def is_categorical_column_weighted(self):\n    if isinstance(self.categorical_column, (fc._WeightedCategoricalColumn, fc_lib.WeightedCategoricalColumn)):\n        return True\n    return False",
    "docstring": "Check if the categorical column of the embedding column is weighted.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column_v2.py",
    "ast_data": "FunctionDef name:is_categorical_column_weighted arg:self arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_switch_canvas_and_return_print_method",
    "source_code": "@contextmanager\ndef _switch_canvas_and_return_print_method(self, fmt, backend=None):\n    canvas = None\n    if backend is not None:\n        from .backends.registry import backend_registry\n        canvas_class = backend_registry.load_backend_module(backend).FigureCanvas\n        if not hasattr(canvas_class, f'print_{fmt}'):\n            raise ValueError(f'The {backend!r} backend does not support {fmt} output')\n        canvas = canvas_class(self.figure)\n    elif hasattr(self, f'print_{fmt}'):\n        canvas = self\n    else:\n        canvas_class = get_registered_canvas_class(fmt)\n        if canvas_class is None:\n            raise ValueError('Format {!r} is not supported (supported formats: {})'.format(fmt, ', '.join(sorted(self.get_supported_filetypes()))))\n        canvas = canvas_class(self.figure)\n    canvas._is_saving = self._is_saving\n    meth = getattr(canvas, f'print_{fmt}')\n    mod = meth.func.__module__ if hasattr(meth, 'func') else meth.__module__\n    if mod.startswith(('matplotlib.', 'mpl_toolkits.')):\n        optional_kws = {'dpi', 'facecolor', 'edgecolor', 'orientation', 'bbox_inches_restore'}\n        skip = optional_kws - {*inspect.signature(meth).parameters}\n        print_method = functools.wraps(meth)(lambda *args, **kwargs: meth(*args, **{k: v for k, v in kwargs.items() if k not in skip}))\n    else:\n        print_method = meth\n    try:\n        yield print_method\n    finally:\n        self.figure.canvas = self",
    "docstring": "Context manager temporarily setting the canvas for saving the figure:: with (canvas._switch_canvas_and_return_print_method(fmt, backend) as print_method): # `get_registered_canvas_class` class of the given backend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_switch_canvas_and_return_print_method arg:self arg:fmt arg:backend arguments arg arg arg Assign If Compare Assign Call If Call Raise Call Assign Call If Call Assign Assign Call If Compare Raise Call Call Call Call Call Assign Call Assign Assign Call Assign Call If Call Assign Assign Call Assign Call Call arguments arg arg Call Call Compare Assign Try Assign"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, data: dict) -> dict:\n    with torch.autocast(enabled=self.conf.mp, device_type='cuda'):\n        return self._forward(data)",
    "docstring": "Match keypoints and descriptors between two images. Input (dict): image0: dict keypoints: [B x M x 2] descriptors: [B x M x D] image: [B x C x H x W] or image_size: [B x 2] image1: dict keypoints: [B x N x 2] descriptors: [B x N x D] image: [B x C x H x W] or image_size: [B x 2] Output (dict): log_assignment: [B x M+1 x N+1] matches0: [B x M] matching_scores0: [B x M] matches1: [B x N] matching_scores1: [B x N] matches: List[[Si x 2]], scores: List[[Si]]",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\lightglue.py",
    "ast_data": "FunctionDef name:forward arg:self arg:data arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "register_symbolic_tensor_type",
    "source_code": "def register_symbolic_tensor_type(cls):\n    global _user_convertible_tensor_types\n    if cls not in _user_convertible_tensor_types:\n        keras_tensor.register_keras_tensor_specialization(cls, keras_tensor.UserRegisteredTypeKerasTensor)\n    _user_convertible_tensor_types.add(cls)",
    "docstring": "Allows users to specify types regarded as symbolic s. Used in conjunction with , calling allows non- objects to be plumbed through Keras layers. Example: Args: cls: A type which shall be regarded as a symbolic .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:register_symbolic_tensor_type arg:cls arguments arg If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "run_forward",
    "source_code": "def run_forward(self, num_runs, print_per_iter, cuda_sync):\n    if print_per_iter:\n        for _ in range(num_runs):\n            start_time = time.time()\n            self.output = self.op_bench.forward_impl()\n            if cuda_sync:\n                torch.cuda.synchronize(torch.cuda.current_device())\n            end_time = time.time()\n            self.time_series.append((end_time - start_time) * 1000.0)\n    else:\n        for _ in range(num_runs):\n            self.output = self.op_bench.forward_impl()\n        if cuda_sync:\n            torch.cuda.synchronize(torch.cuda.current_device())",
    "docstring": "Run the forward path of an op with eager mode",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py",
    "ast_data": "FunctionDef name:run_forward arg:self arg:num_runs arg:print_per_iter arg:cuda_sync arguments arg arg arg arg If For Call Assign Call Assign Call If Call Call Assign Call Call For Call Assign Call If Call Call"
  },
  {
    "library": "kornia",
    "name": "transform",
    "source_code": "@classmethod\ndef transform(cls, input: Keypoints, module: Module, param: ParamItem, extra_args: Optional[Dict[str, Any]]=None) -> Keypoints:\n    if extra_args is None:\n        extra_args = {}\n    _input = input.clone()\n    if isinstance(module, (K.GeometricAugmentationBase2D,)):\n        _input = module.transform_keypoints(_input, cls.get_instance_module_param(param), module.flags, transform=module.transform_matrix, **extra_args)\n    elif isinstance(module, (K.GeometricAugmentationBase3D,)):\n        raise NotImplementedError('The support for 3d keypoint operations are not yet supported. You are welcome to file a PR in our repo.')\n    elif isinstance(module, K.ImageSequential) and (not module.is_intensity_only()):\n        _input = module.transform_keypoints(_input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n    elif isinstance(module, K.container.ImageSequentialBase):\n        _input = module.transform_keypoints(_input, params=cls.get_sequential_module_param(param), extra_args=extra_args)\n    elif isinstance(module, (K.auto.operations.OperationBase,)):\n        return KeypointSequentialOps.transform(input, module=module.op, param=param, extra_args=extra_args)\n    return _input",
    "docstring": "Apply a transformation with respect to the parameters. Args: input: the input tensor, (B, N, 4, 2) or (B, 4, 2). module: any torch Module but only kornia augmentation modules will count to apply transformations. param: the corresponding parameters to the module. extra_args: Optional dictionary of extra arguments with specific options for different input types.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\ops.py",
    "ast_data": "FunctionDef name:transform arg:cls arg:input arg:module arg:param arg:extra_args arguments arg arg arg arg arg If Compare Assign Assign Call If Call Assign Call Call If Call Raise Call If BoolOp Call Call Assign Call Call If Call Assign Call Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_TensorArrayScatterGrad",
    "source_code": "@ops.RegisterGradient('TensorArrayScatter')\n@ops.RegisterGradient('TensorArrayScatterV2')\n@ops.RegisterGradient('TensorArrayScatterV3')\ndef _TensorArrayScatterGrad(op: ops.Operation, flow):\n    handle = op.inputs[0]\n    indices = op.inputs[1]\n    dtype = op.get_attr('T')\n    grad_source = _GetGradSource(flow)\n    flow_out = array_ops.identity(op.outputs[0], 'flow_out')\n    with ops.control_dependencies([flow_out]):\n        flow = array_ops.identity(flow, 'write_barrier')\n    g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow, colocate_with_first_write_call=False).grad(source=grad_source, flow=flow)\n    grad = g.gather(indices)\n    return [None, None, grad, flow]",
    "docstring": "Gradient for TensorArrayScatter. Args: op: Forward TensorArrayScatter op. flow: Gradient flow to TensorArrayScatter. Returns: A grad , the gradient created in upstream ReadGrads or PackGrad.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_grad.py",
    "ast_data": "FunctionDef name:_TensorArrayScatterGrad arg:op arg:flow arguments arg arg Assign Assign Assign Call Assign Call Assign Call With Call Assign Call Assign Call Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "call",
    "source_code": "def call(self, inputs, states):\n    raise NotImplementedError('Abstract method')",
    "docstring": "The function that contains the logic for one RNN step calculation. Args: inputs: the input tensor, which is a slide from the overall RNN input by the time dimension (usually the second dimension). states: the state tensor from previous step, which has the same shape as . In the case of timestep 0, it will be the initial state user specified, or zero filled tensor otherwise. Returns: A tuple of two tensors: 1. output tensor for the current timestep, with size . 2. state tensor for next step, which has the shape of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py",
    "ast_data": "FunctionDef name:call arg:self arg:inputs arg:states arguments arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_inputs_if_ragged",
    "source_code": "def convert_inputs_if_ragged(inputs):\n\n    def _convert_ragged_input(inputs):\n        if isinstance(inputs, ragged_tensor.RaggedTensor):\n            return inputs.to_tensor()\n        return inputs\n    flat_inputs = nest.flatten(inputs)\n    contains_ragged = py_any((isinstance(i, ragged_tensor.RaggedTensor) for i in flat_inputs))\n    if not contains_ragged:\n        return (inputs, None)\n    inputs = nest.map_structure(_convert_ragged_input, inputs)\n    nested_row_lengths = math_ops.cast(flat_inputs[0].nested_row_lengths()[0], 'int32')\n    return (inputs, nested_row_lengths)",
    "docstring": "Converts any ragged tensors to dense.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:convert_inputs_if_ragged arg:inputs arguments arg FunctionDef name:_convert_ragged_input arg:inputs arguments arg If Call Return return:yes Call Return return:yes Assign Call Assign Call Call If Return return:yes Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_resolve_positionals",
    "source_code": "def _resolve_positionals(self, args: tuple[DataSource | VariableSpec, ...], data: DataSource, variables: dict[str, VariableSpec]) -> tuple[DataSource, dict[str, VariableSpec]]:\n    if len(args) > 3:\n        err = 'Plot() accepts no more than 3 positional arguments (data, x, y).'\n        raise TypeError(err)\n    if isinstance(args[0], (abc.Mapping, pd.DataFrame)) or hasattr(args[0], '__dataframe__'):\n        if data is not None:\n            raise TypeError('`data` given by both name and position.')\n        data, args = (args[0], args[1:])\n    if len(args) == 2:\n        x, y = args\n    elif len(args) == 1:\n        x, y = (*args, None)\n    else:\n        x = y = None\n    for name, var in zip('yx', (y, x)):\n        if var is not None:\n            if name in variables:\n                raise TypeError(f'`{name}` given by both name and position.')\n            variables = {name: cast(VariableSpec, var), **variables}\n    return (data, variables)",
    "docstring": "Handle positional arguments, which may contain data / x / y.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:_resolve_positionals arg:self arg:args arg:data arg:variables arguments arg arg arg arg If Compare Call Assign Raise Call If BoolOp Call Call If Compare Raise Call Assign If Compare Call Assign If Compare Call Assign Assign For Call If Compare If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "serialize_gathered_objects",
    "source_code": "def serialize_gathered_objects(graph_view, object_map=None, call_with_mapped_captures=None, saveables_cache=None):\n    trackable_objects, node_paths = graph_view.breadth_first_traversal()\n    object_names = object_identity.ObjectIdentityDictionary()\n    for obj, path in node_paths.items():\n        object_names[obj] = trackable_utils.object_path_to_string(path)\n    node_ids = object_identity.ObjectIdentityDictionary()\n    for node_id, node in enumerate(trackable_objects):\n        node_ids[node] = node_id\n    slot_variables = util.serialize_slot_variables(trackable_objects=trackable_objects, node_ids=node_ids, object_names=object_names)\n    object_graph_proto = _fill_object_graph_proto(graph_view=graph_view, trackable_objects=trackable_objects, node_ids=node_ids, slot_variables=slot_variables)\n    named_saveable_objects, feed_additions, registered_savers = _add_attributes_to_object_graph(trackable_objects=trackable_objects, object_graph_proto=object_graph_proto, node_ids=node_ids, object_names=object_names, object_map=object_map, call_with_mapped_captures=call_with_mapped_captures, saveables_cache=saveables_cache)\n    util.add_checkpoint_values_check(object_graph_proto)\n    return (named_saveable_objects, object_graph_proto, feed_additions, registered_savers)",
    "docstring": "Create SaveableObjects and protos for gathered objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\save_util_v1.py",
    "ast_data": "FunctionDef name:serialize_gathered_objects arg:graph_view arg:object_map arg:call_with_mapped_captures arg:saveables_cache arguments arg arg arg arg Assign Call Assign Call For Call Assign Call Assign Call For Call Assign Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "response_add",
    "source_code": "def response_add(self, request, obj, post_url_continue=None):\n    if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST:\n        request.POST = request.POST.copy()\n        request.POST['_continue'] = 1\n    return super().response_add(request, obj, post_url_continue)",
    "docstring": "Determine the HttpResponse for the add_view stage. It mostly defers to its superclass implementation but is customized because the User model has a slightly different workflow.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\admin.py",
    "ast_data": "FunctionDef name:response_add arg:self arg:request arg:obj arg:post_url_continue arguments arg arg arg arg If BoolOp Compare Compare Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "call_with_captures",
    "source_code": "def call_with_captures(self, args, kwargs, captures):\n    pass",
    "docstring": "Calls this AtomicFunction with captures as defined by its FunctionType. Args: args: Tuple containing positional arguments kwargs: Dict containing keyword arguments captures: Tuple of tensors supplying captured tensor values. Returns: A structured output value based on the inputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\core.py",
    "ast_data": "FunctionDef name:call_with_captures arg:self arg:args arg:kwargs arg:captures arguments arg arg arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "_check_means",
    "source_code": "def _check_means(means, n_components, n_features):\n    means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False)\n    _check_shape(means, (n_components, n_features), 'means')\n    return means",
    "docstring": "Validate the provided 'means'. Parameters ---------- means : array-like of shape (n_components, n_features) The centers of the current components. n_components : int Number of components. n_features : int Number of features. Returns ------- means : array, (n_components, n_features)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_check_means arg:means arg:n_components arg:n_features arguments arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "eigvals",
    "source_code": "def eigvals(self, name='eigvals'):\n    if not self.is_self_adjoint:\n        raise NotImplementedError('Only self-adjoint matrices are supported.')\n    with self._name_scope(name):\n        return self._eigvals()",
    "docstring": "Returns the eigenvalues of this linear operator. If the operator is marked as self-adjoint (via ) this computation can be more efficient. Note: This currently only supports self-adjoint operators. Args: name: A name for this . Returns: Shape of same as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:eigvals arg:self arg:name arguments arg arg If Raise Call With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "TestOneInput",
    "source_code": "def TestOneInput(data):\n    fh = FuzzingHelper(data)\n    input_tensor = fh.get_random_numeric_tensor()\n    _ = tf.raw_ops.Acos(x=input_tensor)",
    "docstring": "Test randomized fuzzing input for tf.raw_ops.Acos.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\acos_fuzz.py",
    "ast_data": "FunctionDef name:TestOneInput arg:data arguments arg Assign Call Assign Call Assign Call"
  },
  {
    "library": "django",
    "name": "CurrentSiteMiddleware",
    "source_code": "class CurrentSiteMiddleware(MiddlewareMixin):\n\n    def process_request(self, request):\n        request.site = get_current_site(request)",
    "docstring": "Middleware that sets attribute to request object.",
    "type": "class",
    "file_path": "django\\django\\contrib\\sites\\middleware.py",
    "ast_data": "ClassDef name:CurrentSiteMiddleware FunctionDef name:process_request arg:self arg:request arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "resize_fn",
    "source_code": "def resize_fn(images_t, new_size):\n    if method == ResizeMethodV1.BILINEAR or method == ResizeMethod.BILINEAR:\n        return gen_image_ops.resize_bilinear(images_t, new_size, align_corners=align_corners)\n    elif method == ResizeMethodV1.NEAREST_NEIGHBOR or method == ResizeMethod.NEAREST_NEIGHBOR:\n        return gen_image_ops.resize_nearest_neighbor(images_t, new_size, align_corners=align_corners)\n    elif method == ResizeMethodV1.BICUBIC or method == ResizeMethod.BICUBIC:\n        return gen_image_ops.resize_bicubic(images_t, new_size, align_corners=align_corners)\n    elif method == ResizeMethodV1.AREA or method == ResizeMethod.AREA:\n        return gen_image_ops.resize_area(images_t, new_size, align_corners=align_corners)\n    else:\n        raise ValueError('Resize method is not implemented: {}'.format(method))",
    "docstring": "Legacy resize core function, passed to _resize_images_common.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:resize_fn arg:images_t arg:new_size arguments arg arg If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call If BoolOp Compare Compare Return return:yes Call Raise Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_params",
    "source_code": "def _check_params(X, metric, p, metric_params):\n    params = zip(['metric', 'p', 'metric_params'], [metric, p, metric_params])\n    est_params = X.get_params()\n    for param_name, func_param in params:\n        if func_param != est_params[param_name]:\n            raise ValueError('Got %s for %s, while the estimator has %s for the same parameter.' % (func_param, param_name, est_params[param_name]))",
    "docstring": "Check the validity of the input parameters",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_graph.py",
    "ast_data": "FunctionDef name:_check_params arg:X arg:metric arg:p arg:metric_params arguments arg arg arg arg Assign Call Assign Call For If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_assign_stablehlo_quantization_config_or_populate_default",
    "source_code": "def _assign_stablehlo_quantization_config_or_populate_default(self, args):\n    if self.experimental_stablehlo_quantizer_config is not None and Optimize.DEFAULT not in self.optimizations:\n        args['quantization_config'] = self.experimental_stablehlo_quantizer_config\n    elif Optimize.DEFAULT in self.optimizations and self.representative_dataset:\n        if len(self._saved_model_exported_names) != 1:\n            raise ValueError('StableHLO quantizer is only supported when converting from a SavedModel with one signature key.')\n        signature_key = self._saved_model_exported_names[0]\n        tfrecord_file_path = tempfile.mkstemp(suffix='.tfrecord', prefix=signature_key)[1]\n        rd.TfRecordRepresentativeDatasetSaver({signature_key: tfrecord_file_path}).save({signature_key: self.representative_dataset()})\n        quantization_config = qc.QuantizationConfig(static_range_ptq_preset=qc.StaticRangePtqPreset(representative_datasets=[qc.RepresentativeDatasetConfig(tf_record=qc.TfRecordFile(path=tfrecord_file_path))], enable_per_channel_quantized_weight=True, enable_full_int_quantization=True), pipeline_config=qc.PipelineConfig(unpack_quantized_types=False))\n        args['quantization_config'] = quantization_config\n    else:\n        raise ValueError('StableHLO quantizer only supports static-range and weight-only PTQ.')",
    "docstring": "Assigns to or populate default. Args: args: Dictionary of argument names and associated values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_assign_stablehlo_quantization_config_or_populate_default arg:self arg:args arguments arg arg If BoolOp Compare Compare Assign If BoolOp Compare If Compare Call Raise Call Assign Assign Call Call Call Call Assign Call Call Call Call Call Assign Raise Call"
  },
  {
    "library": "django",
    "name": "MemoryFileUploadHandler",
    "source_code": "class MemoryFileUploadHandler(FileUploadHandler):\n\n    def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):\n        self.activated = content_length <= settings.FILE_UPLOAD_MAX_MEMORY_SIZE\n\n    def new_file(self, *args, **kwargs):\n        super().new_file(*args, **kwargs)\n        if self.activated:\n            self.file = BytesIO()\n            raise StopFutureHandlers()\n\n    def receive_data_chunk(self, raw_data, start):\n        if self.activated:\n            self.file.write(raw_data)\n        else:\n            return raw_data\n\n    def file_complete(self, file_size):\n        if not self.activated:\n            return\n        self.file.seek(0)\n        return InMemoryUploadedFile(file=self.file, field_name=self.field_name, name=self.file_name, content_type=self.content_type, size=file_size, charset=self.charset, content_type_extra=self.content_type_extra)",
    "docstring": "File upload handler to stream uploads into memory (used for small files).",
    "type": "class",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "ClassDef name:MemoryFileUploadHandler FunctionDef name:handle_raw_input arg:self arg:input_data arg:META arg:content_length arg:boundary arg:encoding arguments arg arg arg arg arg arg Assign Compare FunctionDef name:new_file arg:self arguments arg arg arg Call Call If Assign Call Raise Call FunctionDef name:receive_data_chunk arg:self arg:raw_data arg:start arguments arg arg arg If Call Return return:yes FunctionDef name:file_complete arg:self arg:file_size arguments arg arg If Return return:no Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_clim",
    "source_code": "def get_clim(self):\n    return (self.norm.vmin, self.norm.vmax)",
    "docstring": "Return the values (min, max) that are mapped to the colormap limits.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:get_clim arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_op_policy_uri",
    "source_code": "def validate_op_policy_uri(self):\n    value = self.get('op_policy_uri')\n    if value and (not is_valid_url(value)):\n        raise ValueError('\"op_policy_uri\" MUST be a URL')",
    "docstring": "OPTIONAL. URL that the authorization server provides to the person registering the client to read about the authorization server's requirements on how the client can use the data provided by the authorization server. The registration process SHOULD display this URL to the person registering the client if it is given. As described in Section 5, despite the identifier \"op_policy_uri\" appearing to be OpenID-specific, its usage in this specification is actually referring to a general OAuth 2.0 feature that is not specific to OpenID Connect.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_op_policy_uri arg:self arguments arg Assign Call If BoolOp Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "ancestors",
    "source_code": "def ancestors(*types):\n    check(types)\n    lists = []\n    for t, vas in zip(types, vancestors(*types)):\n        n_vas = len(vas)\n        if n_vas > 1:\n            raise RuntimeError(f'Ambiguous dispatch for {t}: {vas}')\n        elif n_vas == 1:\n            va, = vas\n            mro = type('t', (t, va), {}).__mro__[1:]\n        else:\n            mro = t.__mro__\n        lists.append(mro[:-1])\n    return lists",
    "docstring": "Get a list of virtual MROs, one for each type",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\decorator.py",
    "ast_data": "FunctionDef name:ancestors arguments arg Call Assign For Call Call Assign Call If Compare Raise Call If Compare Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "inv_transform",
    "source_code": "def inv_transform(xs, ys, zs, invM):\n    vec = _vec_pad_ones(xs, ys, zs)\n    vecr = np.dot(invM, vec)\n    if vecr.shape == (4,):\n        vecr = vecr.reshape((4, 1))\n    for i in range(vecr.shape[1]):\n        if vecr[3][i] != 0:\n            vecr[:, i] = vecr[:, i] / vecr[3][i]\n    return (vecr[0], vecr[1], vecr[2])",
    "docstring": "Transform the points by the inverse of the projection matrix, *invM*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\proj3d.py",
    "ast_data": "FunctionDef name:inv_transform arg:xs arg:ys arg:zs arg:invM arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call For Call If Compare Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "load_linnerud",
    "source_code": "@validate_params({'return_X_y': ['boolean'], 'as_frame': ['boolean']}, prefer_skip_nested_validation=True)\ndef load_linnerud(*, return_X_y=False, as_frame=False):\n    data_filename = 'linnerud_exercise.csv'\n    target_filename = 'linnerud_physiological.csv'\n    data_module_path = resources.files(DATA_MODULE)\n    data_path = data_module_path / data_filename\n    with data_path.open('r', encoding='utf-8') as f:\n        header_exercise = f.readline().split()\n        f.seek(0)\n        data_exercise = np.loadtxt(f, skiprows=1)\n    target_path = data_module_path / target_filename\n    with target_path.open('r', encoding='utf-8') as f:\n        header_physiological = f.readline().split()\n        f.seek(0)\n        data_physiological = np.loadtxt(f, skiprows=1)\n    fdescr = load_descr('linnerud.rst')\n    frame = None\n    if as_frame:\n        frame, data_exercise, data_physiological = _convert_data_dataframe('load_linnerud', data_exercise, data_physiological, header_exercise, header_physiological)\n    if return_X_y:\n        return (data_exercise, data_physiological)\n    return Bunch(data=data_exercise, feature_names=header_exercise, target=data_physiological, target_names=header_physiological, frame=frame, DESCR=fdescr, data_filename=data_filename, target_filename=target_filename, data_module=DATA_MODULE)",
    "docstring": "Load and return the physical exercise Linnerud dataset. This dataset is suitable for multi-output regression tasks. ============== ============================ Samples total 20 Dimensionality 3 (for both data and target) Features integer Targets integer ============== ============================ Read more in the :ref:. Parameters ---------- return_X_y : bool, default=False If True, returns `datatargetreturn_X_ydatatarget~sklearn.utils.Bunchas_frame=Truedataas_frame=Truetargetas_frame=Truedatatarget(20, 3)Xy` of a given sample. .. versionadded:: 0.18 Examples -------- >>> from sklearn.datasets import load_linnerud >>> linnerud = load_linnerud() >>> linnerud.data.shape (20, 3) >>> linnerud.target.shape (20, 3)",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_base.py",
    "ast_data": "FunctionDef name:load_linnerud arguments arg arg Assign Assign Assign Call Assign With Call Assign Call Call Call Assign Call Assign With Call Assign Call Call Call Assign Call Assign Call Assign If Assign Call If Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "ConstraintWrapper",
    "source_code": "class ConstraintWrapper:\n\n    def __init__(self, g_cons, g_cons_args):\n        self.g_cons = g_cons\n        self.g_cons_args = g_cons_args\n\n    def gcons(self, v_x_a):\n        vfeasible = True\n        for g, args in zip(self.g_cons, self.g_cons_args):\n            if np.any(g(v_x_a, *args) < 0.0):\n                vfeasible = False\n                break\n        return vfeasible",
    "docstring": "Object to wrap constraints to pass to .",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "ClassDef name:ConstraintWrapper FunctionDef name:__init__ arg:self arg:g_cons arg:g_cons_args arguments arg arg arg Assign Assign FunctionDef name:gcons arg:self arg:v_x_a arguments arg arg Assign For Call If Call Compare Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_score",
    "source_code": "def _score(self, *, predictions, y, n_y, scorer, score_params):\n    if self.is_clf:\n        identity_estimator = _IdentityClassifier(classes=np.arange(n_y))\n        _score = scorer(identity_estimator, predictions, y.argmax(axis=1), **score_params)\n    else:\n        identity_estimator = _IdentityRegressor()\n        if self.alpha_per_target:\n            _score = np.array([scorer(identity_estimator, predictions[:, j], y[:, j], **score_params) for j in range(n_y)])\n        else:\n            _score = scorer(identity_estimator, predictions, y, **score_params)\n    return _score",
    "docstring": "Performs scoring with the specified scorer using the predictions and the true y values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:_score arg:self arguments arg arg arg arg arg arg If Assign Call Call Assign Call Call Assign Call If Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "CacheDataset",
    "source_code": "class CacheDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, filename, name=None):\n        self._input_dataset = input_dataset\n        self._filename = ops.convert_to_tensor(filename, dtype=dtypes.string, name='filename')\n        self._name = name\n        if tf2.enabled() and (context.executing_eagerly() or ops.inside_function()):\n            variant_tensor = gen_dataset_ops.cache_dataset_v2(input_dataset._variant_tensor, filename=self._filename, cache=gen_dataset_ops.dummy_memory_cache(), **self._common_args)\n        else:\n            variant_tensor = gen_dataset_ops.cache_dataset(input_dataset._variant_tensor, filename=self._filename, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)",
    "docstring": "A that caches elements of its input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\cache_op.py",
    "ast_data": "ClassDef name:CacheDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:filename arg:name arguments arg arg arg arg Assign Assign Call Assign If BoolOp Call BoolOp Call Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_ilevel",
    "source_code": "@cache_readonly\ndef _ilevel(self) -> int | None:\n    level = self.level\n    if level is None:\n        return None\n    if not isinstance(level, int):\n        index = self._index\n        if level not in index.names:\n            raise AssertionError(f'Level {level} not in index')\n        return index.names.index(level)\n    return level",
    "docstring": "If necessary, converted index level name to index level position.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\grouper.py",
    "ast_data": "FunctionDef name:_ilevel arg:self arguments arg Assign If Compare Return return:no If Call Assign If Compare Raise Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "forward_log_det_jacobian",
    "source_code": "def forward_log_det_jacobian(self, x, event_ndims, name='forward_log_det_jacobian'):\n    return self._call_forward_log_det_jacobian(x, event_ndims, name)",
    "docstring": "Returns both the forward_log_det_jacobian. Args: x: . The input to the \"forward\" Jacobian determinant evaluation. event_ndims: Number of dimensions in the probabilistic events being transformed. Must be greater than or equal to . The result is summed over the final dimensions to produce a scalar Jacobian determinant for each event, i.e. it has shape dimensions. name: The name to give this op. Returns: , if this bijector is injective. If not injective this is not implemented. Raises: TypeError: if is specified and is not . NotImplementedError: if neither nor {, } are implemented, or this is a non-injective bijector.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:forward_log_det_jacobian arg:self arg:x arg:event_ndims arg:name arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_row_partitions_for_tensor",
    "source_code": "def _row_partitions_for_tensor(value, rank, dtype):\n    shape = array_ops.shape(value, out_type=dtype)\n    return _row_partitions_for_uniform_shape(shape, rank)",
    "docstring": "Returns the row partitions for a tf.Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_row_partitions_for_tensor arg:value arg:rank arg:dtype arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "load_weights",
    "source_code": "def load_weights(mod: nn.Module, names: list[str], params: tuple[Tensor, ...]) -> None:\n    for name, p in zip(names, params):\n        _set_nested_attr(mod, name.split('.'), p)",
    "docstring": "Reload a set of weights so that can be used again to perform a forward pass. Note that the are regular Tensors (that can have history) and so are left as Tensors. This means that mod.parameters() will still be empty after this call.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\utils.py",
    "ast_data": "FunctionDef name:load_weights arg:mod arg:names arg:params arguments arg arg arg For Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_logpdf",
    "source_code": "def _logpdf(self, x, dim, df, scale, log_det_scale, C):\n    log_det_x = np.empty(x.shape[-1])\n    scale_inv_x = np.empty(x.shape)\n    tr_scale_inv_x = np.empty(x.shape[-1])\n    for i in range(x.shape[-1]):\n        _, log_det_x[i] = self._cholesky_logdet(x[:, :, i])\n        scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i])\n        tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace()\n    out = 0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x - (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale + multigammaln(0.5 * df, dim))\n    return out",
    "docstring": "Log of the Wishart probability density function. Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix log_det_scale : float Logarithm of the determinant of the scale matrix C : ndarray Cholesky factorization of the scale matrix, lower triangular. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_logpdf arg:self arg:x arg:dim arg:df arg:scale arg:log_det_scale arg:C arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call For Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_heuristics",
    "source_code": "def get_heuristics(self, name: str) -> list[LearnedHeuristic]:\n    if not LearnedHeuristicController.heuristics_initialized:\n        learned_heuristics_package = 'torch._inductor.autoheuristic.artifacts'\n        base_class = LearnedHeuristic\n        found_heuristics = find_and_instantiate_subclasses(learned_heuristics_package, base_class)\n        for learned_heuristic in found_heuristics:\n            opt_name = learned_heuristic.get_name()\n            LearnedHeuristicController.existing_heuristics[opt_name].append(learned_heuristic)\n        LearnedHeuristicController.heuristics_initialized = True\n    return LearnedHeuristicController.existing_heuristics[name]",
    "docstring": "Returns a list of learned heuristics for the given optimization name.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\learned_heuristic_controller.py",
    "ast_data": "FunctionDef name:get_heuristics arg:self arg:name arguments arg arg If Assign Assign Assign Call For Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_min_max_value_by_expanding_range",
    "source_code": "def _get_min_max_value_by_expanding_range(self, start_idx: int) -> tuple[float, float]:\n    mse_min = (float('inf'), float('inf'), float('inf'))\n    left, right = (start_idx, start_idx)\n    move_left = True\n    while not (left == 0 and right == self._num_bins - 1):\n        if move_left and left > 0 or right == self._num_bins - 1:\n            left = max(left - 1, 0)\n        else:\n            right = min(right + 1, self._num_bins - 1)\n        move_left = not move_left\n        quant_min, quant_max = (self._hist_mids[left], self._hist_mids[right])\n        mse_tuple = self._get_weighted_mean_squared_error(quant_min, quant_max)\n        mse_min = min(mse_tuple, mse_min)\n    min_value, max_value = (mse_min[1], mse_min[2])\n    return (min_value, max_value)",
    "docstring": "Starting from start_idx, expand left and right alternately to find the min value of mse loss. Args: start_idx: Index to start quantization. Returns: (min_value, max_value): Min and max calculated.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "FunctionDef name:_get_min_max_value_by_expanding_range arg:self arg:start_idx arguments arg arg Assign Call Call Call Assign Assign While BoolOp Compare Compare If BoolOp BoolOp Compare Compare Assign Call Assign Call Assign Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "flat_map",
    "source_code": "def flat_map(self, map_func, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import flat_map_op\n    return flat_map_op._flat_map(self, map_func, name=name)",
    "docstring": "Maps across this dataset and flattens the result. The type signature is: Use if you want to make sure that the order of your dataset stays the same. For example, to flatten a dataset of batches into a dataset of their elements: >>> dataset = tf.data.Dataset.from_tensor_slices( ... [[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> dataset = dataset.flat_map(tf.data.Dataset.from_tensor_slices) >>> [a.item() for a in dataset.as_numpy_iterator()] [1, 2, 3, 4, 5, 6, 7, 8, 9] is a generalization of , since produces the same output as Args: map_func: A function mapping a dataset element to a dataset. name: (Optional.) A name for the tf.data operation. Returns: A new with the transformation applied as described above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:flat_map arg:self arg:map_func arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "check_metadata_cacheable",
    "source_code": "def check_metadata_cacheable(metadata: ViewAndMutationMeta):\n    if config.view_replay_for_aliased_outputs:\n        for info in metadata.output_info:\n            if info.functional_tensor is not None:\n                raise BypassAOTAutogradCache('Cannot cache a graph with functional tensor')",
    "docstring": "When view replay is turned on, we bypass autograd cache if the output is aliased.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "FunctionDef name:check_metadata_cacheable arg:metadata arguments arg If For If Compare Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "interval",
    "source_code": "@property\ndef interval(self):\n    return self._interval",
    "docstring": "The time between timer events, in milliseconds.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:interval arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "tf_record_iterator",
    "source_code": "@tf_export(v1=['io.tf_record_iterator', 'python_io.tf_record_iterator'])\n@deprecation.deprecated(date=None, instructions='Use eager execution and: \\n`tf.data.TFRecordDataset(path)`')\ndef tf_record_iterator(path, options=None):\n    compression_type = TFRecordOptions.get_compression_type_string(options)\n    return _pywrap_record_io.RecordIterator(path, compression_type)",
    "docstring": "An iterator that read the records from a TFRecords file. Args: path: The path to the TFRecords file. options: (optional) A TFRecordOptions object. Returns: An iterator of serialized TFRecords. Raises: IOError: If cannot be opened for reading.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\tf_record.py",
    "ast_data": "FunctionDef name:tf_record_iterator arg:path arg:options arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "_kde_support",
    "source_code": "def _kde_support(data, bw, gridsize, cut, clip):\n    support_min = max(data.min() - bw * cut, clip[0])\n    support_max = min(data.max() + bw * cut, clip[1])\n    support = np.linspace(support_min, support_max, gridsize)\n    return support",
    "docstring": "Establish support for a kernel density estimate.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:_kde_support arg:data arg:bw arg:gridsize arg:cut arg:clip arguments arg arg arg arg arg Assign Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "element_wise_eq",
    "source_code": "@register_refinement_rule(torch.add)\n@register_refinement_rule(operator.add)\ndef element_wise_eq(n: Node):\n    res = []\n    if isinstance(n.args[0], Node) and isinstance(n.args[1], Node):\n        arg_type1 = n.args[0].type\n        arg_type2 = n.args[1].type\n        if isinstance(arg_type1, TensorType) and isinstance(arg_type2, TensorType) and isinstance(n.type, TensorType):\n            args1, args2 = broadcast_types(arg_type1, arg_type2)\n            a1 = args1.__args__\n            a2 = args2.__args__\n            a3 = n.type.__args__\n            r = []\n            for x, y, z in zip(a1, a2, a3):\n                if x == y:\n                    r.append(Equality(x, z))\n            res = r\n    return res",
    "docstring": "For element-wise operations and handles broadcasting. Note that after applying broadcasting to the arguments we are able to determine if certain dimensions have not been broadcast if they are symbolicallu equal. in this case, we can establish equality between those dimensions and the corresponding output dimensions. Note that it takes two iterations for this result. One iteration to establish equality between certain dimensions of the operands (requiring the whole solver including unification) and another iteration to establish equality between the operands and the resulting type, requiring another round of constraint generation and unificaiton.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:element_wise_eq arg:n arguments arg Assign If BoolOp Call Call Assign Assign If BoolOp Call Call Call Assign Call Assign Assign Assign Assign For Call If Compare Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "strip_quotes",
    "source_code": "def strip_quotes(table_name):\n    has_quotes = table_name.startswith('\"') and table_name.endswith('\"')\n    return table_name[1:-1] if has_quotes else table_name",
    "docstring": "Strip quotes off of quoted table names to make them safe for use in index names, sequence names, etc. For example '\"USER\".\"TABLE\"' (an Oracle naming scheme) becomes 'USER\".\"TABLE'.",
    "type": "function",
    "file_path": "django\\django\\db\\backends\\utils.py",
    "ast_data": "FunctionDef name:strip_quotes arg:table_name arguments arg Assign BoolOp Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "TodayArchiveView",
    "source_code": "class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):\n    template_name_suffix = '_archive_day'",
    "docstring": "List of objects published today.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "ClassDef name:TodayArchiveView Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_project_and_cluster",
    "source_code": "def _project_and_cluster(self, data, vectors, n_clusters):\n    projected = safe_sparse_dot(data, vectors)\n    _, labels = self._k_means(projected, n_clusters)\n    return labels",
    "docstring": "Project `` and cluster the result.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bicluster.py",
    "ast_data": "FunctionDef name:_project_and_cluster arg:self arg:data arg:vectors arg:n_clusters arguments arg arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "decrypt",
    "source_code": "def decrypt(self, ciphertext, aad, iv, tag, key):\n    raise NotImplementedError",
    "docstring": "Decrypt the given cipher text. :param ciphertext: ciphertext in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param tag: authentication tag in bytes :param key: encrypted key in bytes :return: message",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7516\\models.py",
    "ast_data": "FunctionDef name:decrypt arg:self arg:ciphertext arg:aad arg:iv arg:tag arg:key arguments arg arg arg arg arg arg Raise"
  },
  {
    "library": "django",
    "name": "layer_name",
    "source_code": "@property\ndef layer_name(self):\n    name = capi.get_feat_name(self._layer._ldefn)\n    return force_str(name, self.encoding, strings_only=True)",
    "docstring": "Return the name of the layer for the feature.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py",
    "ast_data": "FunctionDef name:layer_name arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    return (self._row_splits.shape[0] - 1,) + (None,) + self._values.shape[1:]",
    "docstring": "A tuple indicating the shape of this RaggedTensorValue.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_value.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "get_current_site",
    "source_code": "def get_current_site(request):\n    if apps.is_installed('django.contrib.sites'):\n        from .models import Site\n        return Site.objects.get_current(request)\n    else:\n        return RequestSite(request)",
    "docstring": "Check if contrib.sites is installed and return either the current `` object based on the request.",
    "type": "function",
    "file_path": "django\\django\\contrib\\sites\\shortcuts.py",
    "ast_data": "FunctionDef name:get_current_site arg:request arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_AddAndReturnTriL",
    "source_code": "class _AddAndReturnTriL(_Adder):\n\n    def can_add(self, op1, op2):\n        types = {_type(op1), _type(op2)}\n        return not types.difference(_DIAG_LIKE.union({_TRIL}))\n\n    def _add(self, op1, op2, operator_name, hints):\n        if _type(op1) in _EFFICIENT_ADD_TO_TENSOR:\n            op_add_to_tensor, op_other = (op1, op2)\n        else:\n            op_add_to_tensor, op_other = (op2, op1)\n        return linear_operator_lower_triangular.LinearOperatorLowerTriangular(tril=op_add_to_tensor.add_to_tensor(op_other.to_dense()), is_non_singular=hints.is_non_singular, is_self_adjoint=hints.is_self_adjoint, is_positive_definite=hints.is_positive_definite, name=operator_name)",
    "docstring": "Handles additions resulting in a TriL operator.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_addition.py",
    "ast_data": "ClassDef name:_AddAndReturnTriL FunctionDef name:can_add arg:self arg:op1 arg:op2 arguments arg arg arg Assign Call Call Return return:yes Call Call FunctionDef name:_add arg:self arg:op1 arg:op2 arg:operator_name arg:hints arguments arg arg arg arg arg If Compare Call Assign Assign Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "size",
    "source_code": "def size(self):\n    return self.index + 1",
    "docstring": "Returns the size of the vertex cache.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "FunctionDef name:size arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_aggregate_gradients",
    "source_code": "def _aggregate_gradients(self, grads_and_vars):\n    return self.gradient_aggregator(grads_and_vars)",
    "docstring": "Called in to aggregate gradients across devices. Note that user subclasses may override this, so the interface should not be changed. Args: grads_and_vars: List of (gradient, variable) pairs. Returns: A list of (aggregated_gradient, variable) pairs. By default, this calls .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_aggregate_gradients arg:self arg:grads_and_vars arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_restore_from_tensors",
    "source_code": "def _restore_from_tensors(self, restored_tensors):\n    restored_tensor = restored_tensors[trackable.VARIABLE_VALUE_KEY]\n    return state_ops.assign(self, restored_tensor, validate_shape=self.get_shape().is_fully_defined())",
    "docstring": "Implements Trackable._restore_from_tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:_restore_from_tensors arg:self arg:restored_tensors arguments arg arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_epoch_begin",
    "source_code": "def on_epoch_begin(self, epoch, logs=None):\n    if self.histogram_freq and epoch % self.histogram_freq == 0:\n        self.model._make_test_function()\n        if self.merged not in self.model.test_function.fetches:\n            self.model.test_function.fetches.append(self.merged)\n            self.model.test_function.fetch_callbacks[self.merged] = self._fetch_callback",
    "docstring": "Add histogram op to Model eval_function callbacks, reset batch count.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks_v1.py",
    "ast_data": "FunctionDef name:on_epoch_begin arg:self arg:epoch arg:logs arguments arg arg arg If BoolOp Compare Call If Compare Call Assign"
  },
  {
    "library": "pytorch",
    "name": "full_optim_state_dict",
    "source_code": "@staticmethod\ndef full_optim_state_dict(model: torch.nn.Module, optim: torch.optim.Optimizer, optim_input: Optional[Union[list[dict[str, Any]], Iterable[torch.nn.Parameter]]]=None, rank0_only: bool=True, group: Optional[dist.ProcessGroup]=None) -> dict[str, Any]:\n    FullyShardedDataParallel._warn_legacy_optim_state_dict('full_optim_state_dict', 'optim_state_dict', stacklevel=2)\n    return FullyShardedDataParallel._optim_state_dict_impl(model=model, optim=optim, optim_state_dict=optim.state_dict(), optim_input=optim_input, rank0_only=rank0_only, group=group, full_state_dict=True, _stacklevel=2)",
    "docstring": "Return the full optimizer state-dict. Consolidates the full optimizer state on rank 0 and returns it as a :class: following the convention of :meth:, i.e. with keys `dicttorch.optim.Optimizer.state_dictFullyShardedDataParallellistdictdicttorch.optim.Optimizer.state_dictdict`.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:full_optim_state_dict arg:model arg:optim arg:optim_input arg:rank0_only arg:group arguments arg arg arg arg arg Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_session_init",
    "source_code": "@abc.abstractmethod\ndef on_session_init(self, request):\n    pass",
    "docstring": "Callback invoked during construction of the debug-wrapper session. This is a blocking callback. The invocation happens right before the constructor ends. Args: request: () callback request carrying information such as the session being wrapped. Returns: An instance of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:on_session_init arg:self arg:request arguments arg arg"
  },
  {
    "library": "scipy",
    "name": "design_matrix",
    "source_code": "@classmethod\ndef design_matrix(cls, xvals, t, k, extrapolate=True):\n    xvals = np.asarray(xvals, dtype=float)\n    ndim = xvals.shape[-1]\n    if len(t) != ndim:\n        raise ValueError(f'Data and knots are inconsistent: len(t) = {len(t)} for  ndim = {ndim!r}.')\n    k, _indices_k1d, (_t, len_t) = _preprocess_inputs(k, t)\n    c_shape = tuple((len_t[d] - k[d] - 1 for d in range(ndim)))\n    cs = c_shape[1:] + (1,)\n    cstrides = np.cumprod(cs[::-1], dtype=np.int64)[::-1].copy()\n    data, indices, indptr = _dierckx._coloc_nd(xvals, _t, len_t, k, _indices_k1d, cstrides)\n    return csr_array((data, indices, indptr))",
    "docstring": "Construct the design matrix as a CSR format sparse array. Parameters ---------- xvals : ndarray, shape(npts, ndim) Data points. `ValueErrorxvals` and contains values of b-spline basis elements which are non-zero at this value.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_ndbspline.py",
    "ast_data": "FunctionDef name:design_matrix arg:cls arg:xvals arg:t arg:k arg:extrapolate arguments arg arg arg arg arg Assign Call Assign If Compare Call Raise Call Call Assign Call Assign Call Call Assign Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X, ensure_min_features=2)\n    super()._fit(X.T)\n    self._n_features_out = self.n_clusters_\n    return self",
    "docstring": "Fit the hierarchical clustering on the data. Parameters ---------- X : array-like of shape (n_samples, n_features) The data. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Returns the transformer.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_agglomerative.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_resource_inputs",
    "source_code": "def _get_resource_inputs(op):\n    reads, writes = utils.get_read_write_resource_inputs(op)\n    saturated = False\n    while not saturated:\n        saturated = True\n        for key in _acd_resource_resolvers_registry.list():\n            updated = _acd_resource_resolvers_registry.lookup(key)(op, reads, writes)\n            if updated:\n                reads = reads.difference(writes)\n            saturated = saturated and (not updated)\n    for t in reads:\n        yield (t, ResourceType.READ_ONLY)\n    for t in writes:\n        yield (t, ResourceType.READ_WRITE)",
    "docstring": "Returns an iterable of resources touched by this .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\auto_control_deps.py",
    "ast_data": "FunctionDef name:_get_resource_inputs arg:op arguments arg Assign Call Assign While Assign For Call Assign Call Call If Assign Call Assign BoolOp For For"
  },
  {
    "library": "matplotlib",
    "name": "get_markersize",
    "source_code": "def get_markersize(self):\n    return self._markersize",
    "docstring": "Return the marker size in points. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_markersize arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "ZeroSum",
    "source_code": "class ZeroSum(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        if abs(sum(x)) < 3e-16:\n            return 0.0\n        return 1.0 + (10000.0 * abs(sum(x))) ** 0.5",
    "docstring": "ZeroSum objective function. This class defines the ZeroSum [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{ZeroSum}}(x) = \\begin{cases} 0 & \\textrm{if} \\sum_{i=1}^n x_i = 0 \\\\ 1 + \\left(10000 \\left |\\sum_{i=1}^n x_i\\right| \\right)^{0.5} & \\textrm{otherwise} \\end{cases} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: where :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_Z.py",
    "ast_data": "ClassDef name:ZeroSum Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg If Compare Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "rollback",
    "source_code": "@async_unsafe\ndef rollback(self):\n    self.validate_thread_sharing()\n    self.validate_no_atomic_block()\n    self._rollback()\n    self.errors_occurred = False\n    self.needs_rollback = False\n    self.run_on_commit = []",
    "docstring": "Roll back a transaction and reset the dirty flag.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\base.py",
    "ast_data": "FunctionDef name:rollback arg:self arguments arg Call Call Call Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_dt64_to_ordinalf",
    "source_code": "def _dt64_to_ordinalf(d):\n    dseconds = d.astype('datetime64[s]')\n    extra = (d - dseconds).astype('timedelta64[ns]')\n    t0 = np.datetime64(get_epoch(), 's')\n    dt = (dseconds - t0).astype(np.float64)\n    dt += extra.astype(np.float64) / 1000000000.0\n    dt = dt / SEC_PER_DAY\n    NaT_int = np.datetime64('NaT').astype(np.int64)\n    d_int = d.astype(np.int64)\n    dt[d_int == NaT_int] = np.nan\n    return dt",
    "docstring": "Convert or an of those types to Gregorian date as UTC float relative to the epoch (see ). Roundoff is float64 precision. Practically: microseconds for dates between 290301 BC, 294241 AD, milliseconds for larger dates (see ).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:_dt64_to_ordinalf arg:d arguments arg Assign Call Assign Call Assign Call Call Assign Call Call Assign Assign Call Call Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "non_trainable_variables",
    "source_code": "@property\ndef non_trainable_variables(self):\n    global_variables = self.global_variables\n    trainable_variables = set(self.trainable_variables)\n    return [x for x in global_variables if x not in trainable_variables]",
    "docstring": "Returns the list of non-trainable variables created by the Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:non_trainable_variables arg:self arguments arg Assign Assign Call Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "find_dst_rank",
    "source_code": "def find_dst_rank(self, user: fx.Node) -> Optional[int]:\n    if user.op == 'call_module':\n        return self.get_stage_index_of_submod(user.name)\n    else:\n        return None",
    "docstring": "Find the destination rank of a node. If the is not a submod, may be returned.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:find_dst_rank arg:self arg:user arguments arg arg If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "is_devrelease",
    "source_code": "@property\ndef is_devrelease(self) -> bool:\n    return self.dev is not None",
    "docstring": "Whether this version is a development release. >>> Version(\"1.2.3\").is_devrelease False >>> Version(\"1.2.3.dev1\").is_devrelease True",
    "type": "method",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "FunctionDef name:is_devrelease arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "pandas",
    "name": "translate",
    "source_code": "@forbid_nonstring_types(['bytes'])\ndef translate(self, table):\n    result = self._data.array._str_translate(table)\n    dtype = object if self._data.dtype == 'object' else None\n    return self._wrap_result(result, dtype=dtype)",
    "docstring": "Map all characters in the string through the given mapping table. This method is equivalent to the standard :meth: method for strings. It maps each character in the string to a new character according to the translation table provided. Unmapped characters are left unchanged, while characters mapped to None are removed. Parameters ---------- table : dict Table is a mapping of Unicode ordinals to Unicode ordinals, strings, or None. Unmapped characters are left untouched. Characters mapped to None are deleted. :meth: is a helper function for making translation tables. Returns ------- Series or Index A new Series or Index with translated strings. See Also -------- Series.str.replace : Replace occurrences of pattern/regex in the Series with some other string. Index.str.replace : Replace occurrences of pattern/regex in the Index with some other string. Examples -------- >>> ser = pd.Series([\"El niño\", \"Françoise\"]) >>> mytable = str.maketrans({\"ñ\": \"n\", \"ç\": \"c\"}) >>> ser.str.translate(mytable) 0 El nino 1 Francoise dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:translate arg:self arg:table arguments arg arg Assign Call Assign Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_signature_checker",
    "source_code": "def _make_signature_checker(api_signature, signature):\n    if not (isinstance(signature, dict) and all((isinstance(k, (str, int)) for k in signature))):\n        raise TypeError('signatures must be dictionaries mapping parameter names to type annotations.')\n    checkers = []\n    param_names = list(api_signature.parameters)\n    for param_name, param_type in signature.items():\n        if isinstance(param_name, int) and param_name < len(api_signature.parameters):\n            param_name = list(api_signature.parameters.values())[param_name].name\n        param = api_signature.parameters.get(param_name, None)\n        if param is None:\n            raise ValueError(f'signature includes annotation for unknown parameter {param_name!r}.')\n        if param.kind not in (tf_inspect.Parameter.POSITIONAL_ONLY, tf_inspect.Parameter.POSITIONAL_OR_KEYWORD):\n            raise ValueError(f\"Dispatch currently only supports type annotations for positional parameters; can't handle annotation for {param.kind!r} parameter {param_name}.\")\n        checker = make_type_checker(param_type)\n        index = param_names.index(param_name)\n        checkers.append((index, checker))\n    return _api_dispatcher.PySignatureChecker(checkers)",
    "docstring": "Builds a PySignatureChecker for the given type signature. Args: api_signature: The of the API whose signature is being checked. signature: Dictionary mapping parameter names to type annotations. Returns: A .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:_make_signature_checker arg:api_signature arg:signature arguments arg arg If BoolOp Call Call Call Raise Call Assign Assign Call For Call If BoolOp Call Compare Call Assign Call Call Assign Call If Compare Raise Call If Compare Raise Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_convert_to_alignment",
    "source_code": "@classmethod\ndef _convert_to_alignment(cls, alignment_dict):\n    from openpyxl.styles import Alignment\n    return Alignment(**alignment_dict)",
    "docstring": "Convert `` to an openpyxl v2 Alignment object. Parameters ---------- alignment_dict : dict A dict with zero or more of the following keys (or their synonyms). 'horizontal' 'vertical' 'text_rotation' 'wrap_text' 'shrink_to_fit' 'indent' Returns ------- alignment : openpyxl.styles.Alignment",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_openpyxl.py",
    "ast_data": "FunctionDef name:_convert_to_alignment arg:cls arg:alignment_dict arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_deco_axis_expand",
    "source_code": "def _deco_axis_expand(func):\n\n    @functools.wraps(func)\n    def wrapped(a, axis=None, *args, **kwds):\n        if axis is not None:\n            axis = _util.normalize_axis_tuple(axis, a.ndim)\n        if axis == ():\n            newshape = _util.expand_shape(a.shape, axis=0)\n            a = a.reshape(newshape)\n            axis = (0,)\n        return func(a, axis, *args, **kwds)\n    return wrapped",
    "docstring": "Generically handle axis arguments in reductions. axis is *always* the 2nd arg in the function so no need to have a look at its signature",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_reductions_impl.py",
    "ast_data": "FunctionDef name:_deco_axis_expand arg:func arguments arg FunctionDef name:wrapped arg:a arg:axis arguments arg arg arg arg If Compare Assign Call If Compare Assign Call Assign Call Assign Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "pointless_cumsum_replacement",
    "source_code": "@register_graph_pattern(CallFunction(aten.cumsum.default, CallFunction(torch.ops.aten.full.default, KeywordArg('shape'), KeywordArg('fill_value'), dtype=KeywordArg('dtype'), layout=Ignored(), device=KeywordArg('device'), pin_memory=False, _users=MULTIPLE), KeywordArg('dim'), _users=MULTIPLE), pass_dict=pass_patterns[1])\ndef pointless_cumsum_replacement(match: Match, shape, fill_value, device, dtype, dim):\n    if is_integer_dtype(dtype) or is_boolean_dtype(dtype):\n        dtype = torch.int64\n\n    def repl(*shape):\n        dim_size = shape[dim]\n        idx = torch.arange(1, dim_size + 1, device=device, dtype=dtype)\n        inter_shape = [1] * len(shape)\n        inter_shape[dim] = dim_size\n        return (idx * fill_value).view(inter_shape).expand(shape)\n    match.nodes = [match.output_node()]\n    match.replace_by_example(repl, list(shape))",
    "docstring": "Based on a pattern in OPTForCausalLM",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:pointless_cumsum_replacement arg:match arg:shape arg:fill_value arg:device arg:dtype arg:dim arguments arg arg arg arg arg arg If BoolOp Call Call Assign FunctionDef name:repl arguments arg Assign Assign Call Assign Call Assign Return return:yes Call Call Assign Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "flat_outputs",
    "source_code": "@property\ndef flat_outputs(self) -> List[trace.TraceType]:\n    if not hasattr(self, '_cached_flat_outputs'):\n        if self.output is not None:\n            self._cached_flat_outputs = self.output.flatten()\n    return self._cached_flat_outputs",
    "docstring": "Flat tensor outputs returned by this FunctionType.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:flat_outputs arg:self arguments arg If Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_lr",
    "source_code": "def get_lr(self) -> list[float]:\n    raise NotImplementedError",
    "docstring": "Compute learning rate using chainable form of the scheduler.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:get_lr arg:self arguments arg Raise"
  },
  {
    "library": "scipy",
    "name": "fs",
    "source_code": "@fs.setter\ndef fs(self, v: float):\n    if not v > 0:\n        raise ValueError(f'Sampling frequency fs={v} must be positive!')\n    self._fs = v",
    "docstring": "Sampling frequency of input signal and of the window. The sampling frequency is the inverse of the sampling interval . A `` is raised if it is set to a non-positive value.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:fs arg:self arg:v arguments arg arg If Compare Raise Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, pad=0.3, rounding_size=None):\n    self.pad = pad\n    self.rounding_size = rounding_size",
    "docstring": "Parameters ---------- pad : float, default: 0.3 The amount of padding around the original box. rounding_size : float, default: *pad* Radius of the corners.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:pad arg:rounding_size arguments arg arg arg Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_text",
    "source_code": "def set_text(self, s):\n    s = '' if s is None else str(s)\n    if s != self._text:\n        self._text = s\n        self.stale = True",
    "docstring": "Set the text string *s*. It may contain newlines (`str` which is converted to an empty string.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:set_text arg:self arg:s arguments arg arg Assign Compare Call If Compare Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_shift_right_logical_helper",
    "source_code": "def _shift_right_logical_helper(x, y, name=None):\n    assert y.dtype == x.dtype\n    dtype = x.dtype\n    signed = dtype in _SIGNED_TO_UNSIGNED_TABLE\n    if signed:\n        unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[dtype]\n        x = math_ops.cast(x, unsigned_dtype)\n        y = math_ops.cast(y, unsigned_dtype)\n    output = bitwise_ops.right_shift(x, y, name=name)\n    if signed:\n        output = math_ops.cast(output, dtype)\n    return output",
    "docstring": "Performs an integer right logical shift irrespective of input type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py",
    "ast_data": "FunctionDef name:_shift_right_logical_helper arg:x arg:y arg:name arguments arg arg arg Compare Assign Assign Compare If Assign Assign Call Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "paths",
    "source_code": "def paths(self, *paths, **kws):\n    include_non_existing = kws.get('include_non_existing', True)\n    return gpaths(paths, local_path=self.local_path, include_non_existing=include_non_existing)",
    "docstring": "Apply glob to paths and prepend local_path if needed. Applies glob.glob(...) to each path in the sequence (if needed) and prepends the local_path if needed. Because this is called on all source lists, this allows wildcard characters to be specified in lists of sources for extension modules and libraries and scripts and allows path-names be relative to the source directory.",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:paths arg:self arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "list",
    "source_code": "def list(self, ignore_patterns):\n    raise NotImplementedError('subclasses of BaseFinder must provide a list() method')",
    "docstring": "Given an optional list of paths to ignore, return a two item iterable consisting of the relative path and storage instance.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\finders.py",
    "ast_data": "FunctionDef name:list arg:self arg:ignore_patterns arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "has_barrier",
    "source_code": "@property\ndef has_barrier(self):\n    return self._worker_barrier is not None",
    "docstring": "Whether the barrier is set or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:has_barrier arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "ExponentialWithSoftplusRate",
    "source_code": "class ExponentialWithSoftplusRate(Exponential):\n\n    @deprecation.deprecated('2019-01-01', 'Use `tfd.Exponential(tf.nn.softplus(rate)).', warn_once=True)\n    def __init__(self, rate, validate_args=False, allow_nan_stats=True, name='ExponentialWithSoftplusRate'):\n        parameters = dict(locals())\n        with ops.name_scope(name, values=[rate]) as name:\n            super(ExponentialWithSoftplusRate, self).__init__(rate=nn.softplus(rate, name='softplus_rate'), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=name)\n        self._parameters = parameters",
    "docstring": "Exponential with softplus transform on .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\exponential.py",
    "ast_data": "ClassDef name:ExponentialWithSoftplusRate FunctionDef name:__init__ arg:self arg:rate arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg Assign Call Call With Call Call Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "connect_to_remote_host",
    "source_code": "@tf_export('config.experimental_connect_to_host')\ndef connect_to_remote_host(remote_host=None, job_name='worker'):\n    if not remote_host:\n        raise ValueError('Must provide at least one remote_host')\n    remote_hosts = nest.flatten(remote_host)\n    cluster_spec = server_lib.ClusterSpec({job_name: [_strip_prefix(host, _GRPC_PREFIX) for host in remote_hosts]})\n    connect_to_cluster(cluster_spec)",
    "docstring": "Connects to a single machine to enable remote execution on it. Will make devices on the remote host available to use. Note that calling this more than once will work, but will invalidate any tensor handles on the old remote devices. Using the default job_name of worker, you can schedule ops to run remotely as follows: Args: remote_host: a single or a list the remote server addr in host-port format. job_name: The job name under which the new server will be accessible. Raises: ValueError: if remote_host is None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\remote.py",
    "ast_data": "FunctionDef name:connect_to_remote_host arg:remote_host arg:job_name arguments arg arg If Raise Call Assign Call Assign Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_drawstyle",
    "source_code": "def set_drawstyle(self, drawstyle):\n    if drawstyle is None:\n        drawstyle = 'default'\n    _api.check_in_list(self.drawStyles, drawstyle=drawstyle)\n    if self._drawstyle != drawstyle:\n        self.stale = True\n        self._invalidx = True\n    self._drawstyle = drawstyle",
    "docstring": "Set the drawstyle of the plot. The drawstyle determines how the points are connected. Parameters ---------- drawstyle : {'default', 'steps', 'steps-pre', 'steps-mid', 'steps-post'}, default: 'default' For 'default', the points are connected with straight lines. The steps variants connect the points with step-like lines, i.e. horizontal lines with vertical steps. They differ in the location of the step: - 'steps-pre': The step is at the beginning of the line segment, i.e. the line will be at the y-value of point to the right. - 'steps-mid': The step is halfway between the points. - 'steps-post: The step is at the end of the line segment, i.e. the line will be at the y-value of the point to the left. - 'steps' is equal to 'steps-pre' and is maintained for backward-compatibility. For examples see :doc:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_drawstyle arg:self arg:drawstyle arguments arg arg If Compare Assign Call If Compare Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "RemoteModule",
    "source_code": "class RemoteModule(_RemoteModule):\n\n    def __init__(self, remote_device: str, module_cls: type[nn.Module], args: Optional[tuple]=None, kwargs: Optional[dict[str, Any]]=None):\n        super().__init__(remote_device, module_cls, args, kwargs)",
    "docstring": "A RemoteModule instance can only be created after RPC initialization. It creates a user-specified module on a specified remote node. It behaves like a regular `~nn.ModuleDistributedDataParallel tutorial `__.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\nn\\api\\remote_module.py",
    "ast_data": "ClassDef name:RemoteModule FunctionDef name:__init__ arg:self arg:remote_device arg:module_cls arg:args arg:kwargs arguments arg arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_extend_op_single",
    "source_code": "def _extend_op_single(value, leaf_op, empty_st_op=None):\n\n    def to_list_op(element_op):\n        if element_op is None:\n            return None\n\n        def list_op(values):\n            [value] = values\n            return element_op(value)\n        return list_op\n    return _extend_op([value], to_list_op(leaf_op), to_list_op(empty_st_op))",
    "docstring": "Extend an op to a value instead of a list of values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:_extend_op_single arg:value arg:leaf_op arg:empty_st_op arguments arg arg arg FunctionDef name:to_list_op arg:element_op arguments arg If Compare Return return:no FunctionDef name:list_op arg:values arguments arg Assign Return return:yes Call Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_codegen",
    "source_code": "def _get_codegen(in_spec: pytree.TreeSpec, out_spec: Optional[pytree.TreeSpec], forward_arg_names: Optional[list[str]]=None) -> _PyTreeCodeGen:\n    if forward_arg_names:\n        names = forward_arg_names\n    elif in_spec.type == tuple and in_spec.num_children == 2 and (in_spec.children_specs[0].type == tuple) and (in_spec.children_specs[1].type == dict):\n        names = [f'arg_{i}' for i in range(in_spec.children_specs[0].num_children)]\n        names.extend(in_spec.children_specs[1].context)\n    else:\n        names = [f'arg_{i}' for i in range(in_spec.num_children)]\n    return _PyTreeCodeGen(_PyTreeInfo(names, in_spec, out_spec))",
    "docstring": "Create the codegen for the graph module based on the in/out specs",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_unlift.py",
    "ast_data": "FunctionDef name:_get_codegen arg:in_spec arg:out_spec arg:forward_arg_names arguments arg arg arg If Assign If BoolOp Compare Compare Compare Compare Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "smallest_subnormal",
    "source_code": "@property\ndef smallest_subnormal(self):\n    value = self._smallest_subnormal\n    if self.ftype(0) == value:\n        warnings.warn(f'The value of the smallest subnormal for {self.ftype} type is zero.', UserWarning, stacklevel=2)\n    return self._float_to_float(value)",
    "docstring": "Return the value for the smallest subnormal. Returns ------- smallest_subnormal : float value for the smallest subnormal. Warns ----- UserWarning If the calculated value for the smallest subnormal is zero.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\getlimits.py",
    "ast_data": "FunctionDef name:smallest_subnormal arg:self arguments arg Assign If Compare Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "tuple",
    "source_code": "@property\ndef tuple(self):\n    return tuple((self[i].tuple for i in range(self.geom_count)))",
    "docstring": "Return a tuple representation of this Geometry Collection.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:tuple arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "strategy",
    "source_code": "@property\ndef strategy(self):\n    return self._strategy",
    "docstring": "The current object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:strategy arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get",
    "source_code": "def get(self):\n    raise NotImplementedError('Must be implemented in subclasses.')",
    "docstring": "Wait for the result of and return the tensor result. This makes the value concrete by copying the remote tensor to local. Returns: The actual output (in the form of s) of the associated with this , previously returned by a call. This can be a single Tensor, or a structure of Tensors, depending on the output of the . Raises: tf.errors.CancelledError: If the function that produces this is aborted or cancelled due to failure.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\remote_value.py",
    "ast_data": "FunctionDef name:get arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_gru_weights",
    "source_code": "def convert_gru_weights(weights, from_cudnn=True):\n    kernels = transform_kernels(weights[0], transpose_input(from_cudnn), n_gates)\n    recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)\n    biases = np.array(weights[2]).reshape((2, -1) if from_cudnn else -1)\n    return [kernels, recurrent_kernels, biases]",
    "docstring": "Converts the weights between CuDNNGRU and GRU. Args: weights: Original weights. from_cudnn: Indicates whether original weights are from CuDNN layer. Returns: Updated weights compatible with GRU.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py",
    "ast_data": "FunctionDef name:convert_gru_weights arg:weights arg:from_cudnn arguments arg arg Assign Call Call Assign Call arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "add_label",
    "source_code": "def add_label(self, x, y, rotation, lev, cvalue):\n    data_x, data_y = self.axes.transData.inverted().transform((x, y))\n    t = Text(data_x, data_y, text=self.get_text(lev, self.labelFmt), rotation=rotation, horizontalalignment='center', verticalalignment='center', zorder=self._clabel_zorder, color=self.labelMappable.to_rgba(cvalue, alpha=self.get_alpha()), fontproperties=self._label_font_props, clip_box=self.axes.bbox)\n    if self._use_clabeltext:\n        data_rotation, = self.axes.transData.inverted().transform_angles([rotation], [[x, y]])\n        t.set(rotation=data_rotation, transform_rotates_text=True)\n    self.labelTexts.append(t)\n    self.labelCValues.append(cvalue)\n    self.labelXYs.append((x, y))\n    self.axes.add_artist(t)",
    "docstring": "Add a contour label, respecting whether *use_clabeltext* was set.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:add_label arg:self arg:x arg:y arg:rotation arg:lev arg:cvalue arguments arg arg arg arg arg arg Assign Call Call Assign Call Call Call Call If Assign Call Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "is_gray",
    "source_code": "def is_gray(self):\n    if not self._isinit:\n        self._init()\n    return np.all(self._lut[:, 0] == self._lut[:, 1]) and np.all(self._lut[:, 0] == self._lut[:, 2])",
    "docstring": "Return whether the colormap is grayscale.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:is_gray arg:self arguments arg If Call Return return:yes BoolOp Call Compare Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "test_on_batch",
    "source_code": "def test_on_batch(model, inputs, targets, sample_weights=None, output_loss_metrics=None):\n    inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model)\n    with backend.eager_learning_phase_scope(0):\n        outs, total_loss, output_losses, masks = _model_loss(model, inputs, targets, sample_weights=sample_weights, training=False, output_loss_metrics=output_loss_metrics)\n    if not isinstance(outs, list):\n        outs = [outs]\n    metrics_results = _eager_metrics_fn(model, outs, targets, sample_weights=sample_weights, masks=masks)\n    total_loss = nest.flatten(total_loss)\n    return {'total_loss': total_loss, 'output_losses': output_losses, 'metrics': metrics_results}",
    "docstring": "Calculates the loss for one input batch. Args: model: Model whose loss has to be calculated. inputs: Input batch data. targets: Target batch data. sample_weights: Sample weight batch data. output_loss_metrics: List of metrics that are used to aggregated output loss values. Returns: Dict with three items: 'total_loss': single tensor for overall loss, 'output_losses': list of tensors for loss corresponding to each of the model output. Could be a empty list when model has only one output. 'metrics': list of tensors for metric specified.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_eager_v1.py",
    "ast_data": "FunctionDef name:test_on_batch arg:model arg:inputs arg:targets arg:sample_weights arg:output_loss_metrics arguments arg arg arg arg arg Assign Call With Call Assign Call If Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "srid",
    "source_code": "@srid.setter\ndef srid(self, value):\n    self.srs = value",
    "docstring": "Shortcut to set this GDALRaster's srs from an srid.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:srid arg:self arg:value arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "_fftn_c2c",
    "source_code": "def _fftn_c2c(function_name: str, input: TensorLikeType, shape: tuple[int, ...], dim: tuple[int, ...], norm: NormType, forward: bool) -> TensorLikeType:\n    torch._check(input.dtype.is_complex, lambda: f'{function_name} expects a complex input tensor, but got {input.dtype}')\n    x = _resize_fft_input(input, dim, shape)\n    output = prims.fft_c2c(x, dim=dim, forward=forward)\n    return _apply_norm(output, norm=norm, signal_numel=_prod(shape), forward=forward)",
    "docstring": "Common code for n-dimensional complex to complex FFTs (fftn or ifftn)",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\fft.py",
    "ast_data": "FunctionDef name:_fftn_c2c arg:function_name arg:input arg:shape arg:dim arg:norm arg:forward arguments arg arg arg arg arg arg Call arguments Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "patfilter",
    "source_code": "def patfilter(names: Iterable[str], pat: str) -> list[str]:\n    if pat not in _pat_cache:\n        _pat_cache[pat] = re.compile(_translate_pattern(pat))\n    match = _pat_cache[pat].match\n    return list(filter(match, names))",
    "docstring": "Return the subset of the list ``. Adapted from fnmatch module.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\matching.py",
    "ast_data": "FunctionDef name:patfilter arg:names arg:pat arguments arg arg If Compare Assign Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_as_ref",
    "source_code": "@property\ndef _as_ref(self):\n    if self._is_ref_dtype:\n        return self\n    else:\n        return _INTERN_TABLE[self._type_enum + 100]",
    "docstring": "Returns a reference based on this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:_as_ref arg:self arguments arg If Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, image_embedding: Tensor, image_pe: Tensor, point_embedding: Tensor) -> tuple[Tensor, Tensor]:\n    bs, c, h, w = image_embedding.shape\n    image_embedding = image_embedding.flatten(2).permute(0, 2, 1)\n    image_pe = image_pe.flatten(2).permute(0, 2, 1)\n    queries = point_embedding\n    keys = image_embedding\n    for layer in self.layers:\n        queries, keys = layer(queries=queries, keys=keys, query_pe=point_embedding, key_pe=image_pe)\n    q = queries + point_embedding\n    k = keys + image_pe\n    attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)\n    queries = queries + attn_out\n    queries = self.norm_final_attn(queries)\n    return (queries, keys)",
    "docstring": "Run forward. Args: image_embedding: image to attend to. Should be shape B x embedding_dim x h x w for any h and w. image_pe: the positional encoding to add to the image. Must have the same shape as image_embedding. point_embedding: the embedding to add to the query points. Must have shape B x N_points x embedding_dim for any N_points. Returns: - the processed point_embedding - the processed image_embedding",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\transformer.py",
    "ast_data": "FunctionDef name:forward arg:self arg:image_embedding arg:image_pe arg:point_embedding arguments arg arg arg arg Assign Assign Call Call Assign Call Call Assign Assign For Assign Call Assign Assign Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated(None, 'use distribute.MultiWorkerMirroredStrategy instead')\ndef __init__(self, communication=collective_util.CommunicationImplementation.AUTO, cluster_resolver=None):\n    communication_options = collective_util.Options(implementation=communication)\n    super(_CollectiveAllReduceStrategyExperimental, self).__init__(cluster_resolver, communication_options)",
    "docstring": "Creates the strategy. Args: communication: optional . This is a hint on the preferred collective communication implementation. Possible values include , , and . cluster_resolver: optional . If , is used.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:communication arg:cluster_resolver arguments arg arg arg Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "sharded_variable_creator",
    "source_code": "def sharded_variable_creator(next_creator: Callable[..., tf_variables.Variable], *args, **kwargs):\n    kwargs['skip_mirrored_creator'] = True\n    num_hosts = len(hosts)\n    name, shape, dtype, unwrapped_initial_value = extract_variable_info(kwargs)\n    initial_value = kwargs['initial_value']\n    rows = shape[0]\n    cols = shape[1]\n    partial_partition = rows % num_hosts\n    full_rows_per_host = rows // num_hosts\n    partitions = [full_rows_per_host + 1] * partial_partition + [full_rows_per_host] * (num_hosts - partial_partition)\n    variables = []\n    sharding_aware = 'shard_info' in tf_inspect.getargspec(initial_value).args\n    offset = 0\n    kwargs['dtype'] = dtype\n    for i, p in enumerate(partitions):\n        if p == 0:\n            continue\n        with ops.device(hosts[i]):\n            kwargs['name'] = '{}_{}'.format(name, i)\n            kwargs['shape'] = (p, cols)\n            if sharding_aware:\n                shard_info = base.ShardInfo(kwargs['shape'], (offset, 0))\n                kwargs['initial_value'] = functools.partial(initial_value, shard_info=shard_info)\n                offset += p\n            else:\n                kwargs['initial_value'] = functools.partial(unwrapped_initial_value, kwargs['shape'], dtype=dtype)\n            variables.append(next_creator(*args, **kwargs))\n    return TPUEmbeddingVariable(variables, name=name)",
    "docstring": "The sharded variable creator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:sharded_variable_creator arg:next_creator arguments arg arg arg Assign Assign Call Assign Call Assign Assign Assign Assign Assign Assign Assign Assign Compare Call Assign Assign For Call If Compare With Call Assign Call Assign If Assign Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__bool__",
    "source_code": "def __bool__(self):\n    return True",
    "docstring": "Return True since all formsets have a management form which is not included in the length.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:__bool__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "build_container",
    "source_code": "def build_container(self, outname: str='META-INF/container.xml') -> None:\n    logger.info(__('writing META-INF/container.xml file...'))\n    outdir = self.outdir / 'META-INF'\n    ensuredir(outdir)\n    copyfile(self.template_dir / 'container.xml', outdir / 'container.xml', force=True)",
    "docstring": "Write the metainfo file META-INF/container.xml.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\_epub_base.py",
    "ast_data": "FunctionDef name:build_container arg:self arg:outname arguments arg arg Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_step_for_v2",
    "source_code": "def _get_step_for_v2():\n    step = _summary_ops_v2.get_step()\n    if step is not None:\n        return step\n    return _training_util.get_global_step()",
    "docstring": "Get step for v2 summary invocation in v1. In order to invoke v2 op in , global step needs to be set for the v2 summary writer. Returns: The step set by or , or None is no step has been set.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\summary.py",
    "ast_data": "FunctionDef name:_get_step_for_v2 arguments Assign Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "remove_toolitem",
    "source_code": "def remove_toolitem(self, name):\n    raise NotImplementedError",
    "docstring": "A hook to remove a toolitem from the container. This hook must be implemented in each backend and contains the backend-specific code to remove an element from the toolbar; it is called when emits a `.ToolManagerToolContainerBase.ToolManager.remove_tool`. Parameters ---------- name : str Name of the tool to remove.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:remove_toolitem arg:self arg:name arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "initialize",
    "source_code": "def initialize(self, table):\n    check_table_dtypes(table, self._keys.dtype, self._values.dtype)\n    with ops.name_scope(self._name, values=(table.resource_handle, self._keys, self._values)):\n        init_op = gen_lookup_ops.lookup_table_import_v2(table.resource_handle, self._keys, self._values)\n    ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)\n    return init_op",
    "docstring": "Initializes the given with and tensors. Args: table: The table to initialize. Returns: The operation that initializes the table. Raises: TypeError: when the keys and values data types do not match the table key and value data types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:initialize arg:self arg:table arguments arg arg Call With Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_mpl_coords",
    "source_code": "def _mpl_coords(self, event=None):\n    if event is None:\n        window = self.get_window()\n        t, x, y, state = window.get_device_position(window.get_display().get_device_manager().get_client_pointer())\n    else:\n        x, y = (event.x, event.y)\n    x = x * self.device_pixel_ratio\n    y = self.figure.bbox.height - y * self.device_pixel_ratio\n    return (x, y)",
    "docstring": "Convert the position of a GTK event, or of the current cursor position if *event* is None, to Matplotlib coordinates. GTK use logical pixels, but the figure is scaled to physical pixels for rendering. Transform to physical pixels so that all of the down-stream transforms work as expected. Also, the origin is different and needs to be corrected.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_gtk3.py",
    "ast_data": "FunctionDef name:_mpl_coords arg:self arg:event arguments arg arg If Compare Assign Call Assign Call Call Call Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ArgsKwargsPair",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass ArgsKwargsPair(NamedTuple):\n    args: tuple[Any, ...]\n    kwargs: dict[str, Any]",
    "docstring": "Simple named tuple for wrapping args/kwargs pairs.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\operator_schemas.py",
    "ast_data": "ClassDef name:ArgsKwargsPair Call"
  },
  {
    "library": "kornia",
    "name": "get_laf_scale",
    "source_code": "def get_laf_scale(LAF: Tensor) -> Tensor:\n    KORNIA_CHECK_LAF(LAF)\n    eps = 1e-10\n    out = LAF[..., 0:1, 0:1] * LAF[..., 1:2, 1:2] - LAF[..., 1:2, 0:1] * LAF[..., 0:1, 1:2] + eps\n    return out.abs().sqrt()",
    "docstring": "Return a scale of the LAFs. Args: LAF: :math: Returns: scale :math: Example: >>> input = torch.ones(1, 5, 2, 3) # BxNx2x3 >>> output = get_laf_scale(input) # BxNx1x1",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:get_laf_scale arg:LAF arguments arg Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "allow_rasterization",
    "source_code": "def allow_rasterization(draw):\n\n    @wraps(draw)\n    def draw_wrapper(artist, renderer):\n        try:\n            if artist.get_rasterized():\n                if renderer._raster_depth == 0 and (not renderer._rasterizing):\n                    renderer.start_rasterizing()\n                    renderer._rasterizing = True\n                renderer._raster_depth += 1\n            elif renderer._raster_depth == 0 and renderer._rasterizing:\n                renderer.stop_rasterizing()\n                renderer._rasterizing = False\n            if artist.get_agg_filter() is not None:\n                renderer.start_filter()\n            return draw(artist, renderer)\n        finally:\n            if artist.get_agg_filter() is not None:\n                renderer.stop_filter(artist.get_agg_filter())\n            if artist.get_rasterized():\n                renderer._raster_depth -= 1\n            if renderer._rasterizing and (fig := artist.get_figure(root=True)) and fig.suppressComposite:\n                renderer.stop_rasterizing()\n                renderer.start_rasterizing()\n    draw_wrapper._supports_rasterization = True\n    return draw_wrapper",
    "docstring": "Decorator for Artist.draw method. Provides routines that run before and after the draw call. The before and after functions are useful for changing artist-dependent renderer attributes or making other setup function calls, such as starting and flushing a mixed-mode renderer.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:allow_rasterization arg:draw arguments arg FunctionDef name:draw_wrapper arg:artist arg:renderer arguments arg arg Try If Call If BoolOp Compare Call Assign If BoolOp Compare Call Assign If Compare Call Call Return return:yes Call If Compare Call Call Call If Call If BoolOp Call Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "smooth_l1_loss",
    "source_code": "@elementwise_type_promotion_wrapper(type_promoting_args=('input', 'target'), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT)\ndef smooth_l1_loss(input: TensorLikeType, target: TensorLikeType, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean', beta: float=1.0) -> TensorLikeType:\n    if size_average is not None or reduce is not None:\n        reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)\n    _check_reduction_value(reduction)\n    if beta == 0.0:\n        return torch.nn.functional.l1_loss(input, target, size_average=size_average, reduce=reduce, reduction=reduction)\n    else:\n        loss = torch.abs(input - target)\n        loss = torch.where(loss < beta, 0.5 * loss ** 2 / beta, loss - 0.5 * beta)\n        return _apply_loss_reduction(loss, reduction)",
    "docstring": "Reference implementation of torch.nn.functional.smooth_l1_loss",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:smooth_l1_loss arg:input arg:target arg:size_average arg:reduce arg:reduction arg:beta arguments arg arg arg arg arg arg If BoolOp Compare Compare Assign Call Call If Compare Return return:yes Call Assign Call Assign Call Compare Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_save",
    "source_code": "def _save(input_dataset, path, compression=None, shard_func=None, checkpoint_args=None):\n    if context.executing_eagerly() and checkpoint_args:\n        save_dataset = _SaveDataset(input_dataset, path, shard_func, compression)\n        save_iterator = iter(save_dataset)\n        if 'checkpoint' in checkpoint_args:\n            raise ValueError(\"'Invalid `checkpoint_args`. `checkpoint_args` are not allowed to include 'checkpoint'.\")\n        checkpoint = checkpoint_lib.Checkpoint(iterator=save_iterator)\n        checkpoint_args['checkpoint'] = checkpoint\n        manager = checkpoint_management.CheckpointManager(**checkpoint_args)\n        checkpoint.restore(manager.latest_checkpoint)\n        for _ in enumerate(save_iterator):\n            if 'step_counter' in checkpoint_args:\n                checkpoint_args['step_counter'].assign_add(delta=1)\n            manager.save(check_interval=True)\n    else:\n        dataset, shard_func, use_shard_func, path = set_save_dataset_attributes(input_dataset, shard_func, path)\n        return ged_ops.save_dataset(dataset._variant_tensor, path=path, shard_func_other_args=shard_func.captured_inputs, compression=compression, shard_func=shard_func, use_shard_func=use_shard_func)",
    "docstring": "Implements the save function and checkpoint functionality.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\save_op.py",
    "ast_data": "FunctionDef name:_save arg:input_dataset arg:path arg:compression arg:shard_func arg:checkpoint_args arguments arg arg arg arg arg If BoolOp Call Assign Call Assign Call If Compare Raise Call Assign Call Assign Assign Call Call For Call If Compare Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_left_axes",
    "source_code": "@property\ndef _left_axes(self):\n    if self._col_wrap is None:\n        return self.axes[:, 0].flat\n    else:\n        axes = []\n        for i, ax in enumerate(self.axes):\n            if not i % self._ncol:\n                axes.append(ax)\n        return np.array(axes, object).flat",
    "docstring": "Return a flat array of the left column of axes.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:_left_axes arg:self arguments arg If Compare Return return:yes Assign For Call If Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "wrapper",
    "source_code": "def wrapper(*args, **kwargs):\n    layer = call_collection.layer\n    training = None\n    inputs = _filtered_inputs([args, kwargs])\n    if (args or kwargs) and call_collection.training_arg_was_passed(args, kwargs):\n        training = call_collection.get_training_arg_value(args, kwargs)\n    original_losses = _reset_layer_losses(layer)\n    with base_layer_utils.call_context().enter(layer, inputs=inputs, build_graph=False, training=training, saving=True):\n        with autocast_variable.enable_auto_cast_variables(layer._compute_dtype_object):\n            ret = method(*args, **kwargs)\n    _restore_layer_losses(original_losses)\n    return ret",
    "docstring": "Calls method within call context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:wrapper arguments arg arg Assign Assign Assign Call If BoolOp BoolOp Call Assign Call Assign Call With Call Call With Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "Contrast",
    "source_code": "class Contrast(OperationBase):\n\n    def __init__(self, initial_magnitude: Optional[float]=0.5, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(0.2, 1.8), temperature: float=0.1, symmetric_megnitude: bool=False) -> None:\n        super().__init__(K.RandomContrast(magnitude_range, same_on_batch=False, p=initial_probability), initial_magnitude=[('contrast_factor', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude)",
    "docstring": "Apply contrast operation. Args: initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. initial_magnitude: the initial magnitude. magnitude_range: the sampling range for random sampling and clamping the optimized magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:Contrast FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_nontrivial_guards",
    "source_code": "def get_nontrivial_guards(self) -> list[SympyBoolean]:\n    return [self.simplify(guard.expr) for guard in self.guards if self._maybe_evaluate_static(guard.expr, axioms=(), size_oblivious=guard.size_oblivious) is None]",
    "docstring": "Returns a list of guard expressions that aren't statically known (i.e. not trivial)",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:get_nontrivial_guards arg:self arguments arg Return return:yes Call Compare Call"
  },
  {
    "library": "numpy",
    "name": "hermemulx",
    "source_code": "def hermemulx(c):\n    [c] = pu.as_series([c])\n    if len(c) == 1 and c[0] == 0:\n        return c\n    prd = np.empty(len(c) + 1, dtype=c.dtype)\n    prd[0] = c[0] * 0\n    prd[1] = c[0]\n    for i in range(1, len(c)):\n        prd[i + 1] = c[i]\n        prd[i - 1] += c[i] * i\n    return prd",
    "docstring": "Multiply a Hermite series by x. Multiply the Hermite series by x, where x is the independent variable. Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the result of the multiplication. See Also -------- hermeadd, hermesub, hermemul, hermediv, hermepow Notes ----- The multiplication uses the recursion relationship for Hermite polynomials in the form .. math:: xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) Examples -------- >>> from numpy.polynomial.hermite_e import hermemulx >>> hermemulx([1, 2, 3]) array([2., 7., 2., 3.])",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "FunctionDef name:hermemulx arg:c arguments arg Assign Call If BoolOp Compare Call Compare Return return:yes Assign Call Call Assign Assign For Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_warn_mkl_vcomp",
    "source_code": "@abstractmethod\ndef _warn_mkl_vcomp(self, n_active_threads):\n    pass",
    "docstring": "Issue an estimator specific warning when vcomp and mkl are both present This method is called by .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_kmeans.py",
    "ast_data": "FunctionDef name:_warn_mkl_vcomp arg:self arg:n_active_threads arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "mps",
    "source_code": "def mps(self):\n    if self.device.type != 'mps':\n        return torch.UntypedStorage(self.size(), device='mps').copy_(self, False)\n    return self",
    "docstring": "Return a MPS copy of this storage if it's not already on the MPS.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:mps arg:self arguments arg If Compare Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_is_static_problem",
    "source_code": "def _is_static_problem(layout: Layout) -> tuple[bool, bool]:\n    static_shape = True\n    static_size = PythonWrapperCodegen.statically_known_list_of_ints_or_none(layout.size)\n    if static_size is None:\n        nonzero = True\n        for s in layout.size:\n            sz = PythonWrapperCodegen.statically_known_int_or_none(s)\n            if sz is not None and sz == 0:\n                nonzero = False\n                break\n        return (False, nonzero)\n    numel = 1\n    for dim in static_size:\n        numel *= dim\n    nonzero = numel > 0\n    return (static_shape, nonzero)",
    "docstring": "Check if input tensors and output layout have static shapes and non-zero sizes. Args: layout: Output layout object with a 'size' attribute. Returns: Tuple[bool, bool]: (is_static, is_nonzero) is_static: True if all shapes are statically known is_nonzero: True if all dimensions are non-zero",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\mm_common.py",
    "ast_data": "FunctionDef name:_is_static_problem arg:layout arguments arg Assign Assign Call If Compare Assign For Assign Call If BoolOp Compare Compare Assign Return return:yes Assign For Assign Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "tail",
    "source_code": "@final\ndef tail(self, n: int=5) -> Self:\n    if n == 0:\n        return self.iloc[0:0].copy()\n    return self.iloc[-n:].copy()",
    "docstring": "Return the last rows. This function returns last rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of , this function returns all rows except the first rows, equivalent to `nnnn` >>> df.tail(-3) animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:tail arg:self arg:n arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_rvs_3d",
    "source_code": "def _rvs_3d(self, kappa, size, random_state):\n    if size is None:\n        sample_size = 1\n    else:\n        sample_size = size\n    x = random_state.random(sample_size)\n    x = 1.0 + np.log(x + (1.0 - x) * np.exp(-2 * kappa)) / kappa\n    temp = np.sqrt(1.0 - np.square(x))\n    uniformcircle = _sample_uniform_direction(2, sample_size, random_state)\n    samples = np.stack([x, temp * uniformcircle[..., 0], temp * uniformcircle[..., 1]], axis=-1)\n    if size is None:\n        samples = np.squeeze(samples)\n    return samples",
    "docstring": "Generate samples from a von Mises-Fisher distribution with mu = [1, 0, 0] and kappa. Samples then have to be rotated towards the desired mean direction mu. This method is much faster than the general rejection sampling based algorithm. Reference:",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_rvs_3d arg:self arg:kappa arg:size arg:random_state arguments arg arg arg arg If Compare Assign Assign Assign Call Assign Call Call Assign Call Call Assign Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "highlight_min",
    "source_code": "@Substitution(subset=subset_args, color=coloring_args.format(default='yellow'), props=properties_args)\ndef highlight_min(self, subset: Subset | None=None, color: str='yellow', axis: Axis | None=0, props: str | None=None) -> Styler:\n    if props is None:\n        props = f'background-color: {color};'\n    return self.apply(partial(_highlight_value, op='min'), axis=axis, subset=subset, props=props)",
    "docstring": "Highlight the minimum with a style. Parameters ---------- %(subset)s %(color)s axis : {0 or 'index', 1 or 'columns', None}, default 0 Apply to each column (`Table Visualization `_ for more examples.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:highlight_min arg:self arg:subset arg:color arg:axis arg:props arguments arg arg arg arg arg If Compare Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "CloughTocherInterpolatorSubclass",
    "source_code": "class CloughTocherInterpolatorSubclass(Benchmark):\n    param_names = ['n_samples']\n    params = [10, 50, 100]\n\n    def setup(self, n_samples):\n        rng = np.random.default_rng(314159)\n        x = rng.random(n_samples) - 0.5\n        y = rng.random(n_samples) - 0.5\n        self.z = np.hypot(x, y)\n        X = np.linspace(min(x), max(x))\n        Y = np.linspace(min(y), max(y))\n        self.X, self.Y = np.meshgrid(X, Y)\n        self.interp = CloughTocherInterpolatorValues(list(zip(x, y)), (self.X, self.Y))\n\n    def time_clough_tocher(self, n_samples):\n        self.interp(self.z)",
    "docstring": "Benchmark CloughTocherInterpolatorValues. Derived from the docstring example,",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\interpolate.py",
    "ast_data": "ClassDef name:CloughTocherInterpolatorSubclass Assign Assign FunctionDef name:setup arg:self arg:n_samples arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Assign Call Assign Call Call Call FunctionDef name:time_clough_tocher arg:self arg:n_samples arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_sanitize_column_name_for_variable_scope",
    "source_code": "def _sanitize_column_name_for_variable_scope(name):\n    invalid_char = re.compile('[^A-Za-z0-9_.\\\\-]')\n    return invalid_char.sub('_', name)",
    "docstring": "Sanitizes user-provided feature names for use as variable scopes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_sanitize_column_name_for_variable_scope arg:name arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_conversion_metadata",
    "source_code": "def get_conversion_metadata(model_buffer):\n    model_object = flatbuffer_utils.convert_bytearray_to_object(model_buffer)\n    if not model_object or not model_object.metadata:\n        return None\n    for meta in model_object.metadata:\n        if meta.name.decode('utf-8') == CONVERSION_METADATA_FIELD_NAME:\n            metadata_buf = model_object.buffers[meta.buffer].data.tobytes()\n            return conversion_metadata_fb.ConversionMetadataT.InitFromObj(conversion_metadata_fb.ConversionMetadata.GetRootAsConversionMetadata(metadata_buf, 0))\n    return None",
    "docstring": "Read conversion metadata from a tflite model. Args: model_buffer: A tflite model. Returns: The conversion metadata or None if it is not populated.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:get_conversion_metadata arg:model_buffer arguments arg Assign Call If BoolOp Return return:no For If Compare Call Assign Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "is_exporting",
    "source_code": "def is_exporting() -> bool:\n    return _is_exporting_flag",
    "docstring": "Indicated whether we're under exporting. It's stricter than is_compiling() flag, as it would only be set to True when torch.export is used. Example:: >>> def forward(self, x): >>> if not torch.compiler.is_exporting(): >>> pass # ...logic that is not needed in export... >>> >>> # ...rest of the function...",
    "type": "function",
    "file_path": "pytorch\\torch\\compiler\\__init__.py",
    "ast_data": "FunctionDef name:is_exporting arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter_min",
    "source_code": "def scatter_min(self, sparse_delta, use_locking=False, name=None):\n    per_var_sparse_delta = self._decompose_indexed_slices(sparse_delta)\n    for i, v in enumerate(self._variables):\n        new_name = None\n        if name is not None:\n            new_name = '{}/part_{}'.format(name, i)\n        v.scatter_min(per_var_sparse_delta[i], name=new_name)\n    return self",
    "docstring": "Implements tf.Variable.scatter_min.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:scatter_min arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Assign Call For Call Assign If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "dataclasses_to_dicts",
    "source_code": "def dataclasses_to_dicts(data):\n    from dataclasses import asdict\n    return list(map(asdict, data))",
    "docstring": "Converts a list of dataclass instances to a list of dictionaries. Parameters ---------- data : List[Type[dataclass]] Returns -------- list_dict : List[dict] Examples -------- >>> from dataclasses import dataclass >>> @dataclass ... class Point: ... x: int ... y: int >>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)]) [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\construction.py",
    "ast_data": "FunctionDef name:dataclasses_to_dicts arg:data arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, shape, dtype=dtypes.float32, name=None):\n    self._shape = tensor_shape.TensorShape(shape)\n    self._dtype = dtypes.as_dtype(dtype)\n    self._name = name",
    "docstring": "Creates a TensorSpec. Args: shape: Value convertible to . The shape of the tensor. dtype: Value convertible to . The type of the tensor values. name: Optional name for the Tensor. Raises: TypeError: If shape is not convertible to a , or dtype is not convertible to a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:shape arg:dtype arg:name arguments arg arg arg arg Assign Call Assign Call Assign"
  },
  {
    "library": "django",
    "name": "get_next_day",
    "source_code": "def get_next_day(self, date):\n    return _get_next_prev(self, date, is_previous=False, period='day')",
    "docstring": "Get the next valid day.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_next_day arg:self arg:date arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_variables",
    "source_code": "def _get_variables(graph=None):\n    assert not context.executing_eagerly()\n    variables = _GRAPH_VARIABLES[graph]\n    for opt in _GRAPH_TF_OPTIMIZERS[graph]:\n        variables.update(opt.optimizer.variables())\n    return variables",
    "docstring": "Returns variables corresponding to the given graph for initialization.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:_get_variables arg:graph arguments arg Call Assign For Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_map_attributes",
    "source_code": "def _map_attributes(self, arg, levels, defaults, attr):\n    if arg is True:\n        lookup_table = dict(zip(levels, defaults))\n    elif isinstance(arg, dict):\n        missing = set(levels) - set(arg)\n        if missing:\n            err = f'These `{attr}` levels are missing values: {missing}'\n            raise ValueError(err)\n        lookup_table = arg\n    elif isinstance(arg, Sequence):\n        arg = self._check_list_length(levels, arg, attr)\n        lookup_table = dict(zip(levels, arg))\n    elif arg:\n        err = f'This `{attr}` argument was not understood: {arg}'\n        raise ValueError(err)\n    else:\n        lookup_table = {}\n    return lookup_table",
    "docstring": "Handle the specification for a given style attribute.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:_map_attributes arg:self arg:arg arg:levels arg:defaults arg:attr arguments arg arg arg arg arg If Compare Assign Call Call If Call Assign Call Call If Assign Raise Call Assign If Call Assign Call Assign Call Call If Assign Raise Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "make_compound_path",
    "source_code": "@classmethod\ndef make_compound_path(cls, *args):\n    if not args:\n        return Path(np.empty([0, 2], dtype=np.float32))\n    vertices = np.concatenate([path.vertices for path in args])\n    codes = np.empty(len(vertices), dtype=cls.code_type)\n    i = 0\n    for path in args:\n        size = len(path.vertices)\n        if path.codes is None:\n            if size:\n                codes[i] = cls.MOVETO\n                codes[i + 1:i + size] = cls.LINETO\n        else:\n            codes[i:i + size] = path.codes\n        i += size\n    not_stop_mask = codes != cls.STOP\n    return cls(vertices[not_stop_mask], codes[not_stop_mask])",
    "docstring": "Concatenate a list of \\s into a single , removing all \\s.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:make_compound_path arg:cls arguments arg arg If Return return:yes Call Call Assign Call Assign Call Call Assign For Assign Call If Compare If Assign Assign Assign Assign Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "has_tensor_output",
    "source_code": "def has_tensor_output(self) -> bool:\n    return isinstance(self.maybe_get_output_spec(), Layout)",
    "docstring": "True for single tensor output (excludes MultiOutput)",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:has_tensor_output arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_spec",
    "source_code": "@property\ndef parse_example_spec(self):\n    config = self.categorical_column.parse_example_spec\n    if self.weight_feature_key in config:\n        raise ValueError('Parse config {} already exists for {}.'.format(config[self.weight_feature_key], self.weight_feature_key))\n    config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)\n    return config",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Assign If Compare Raise Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "localize",
    "source_code": "def localize(value, use_l10n=None):\n    if isinstance(value, str):\n        return value\n    elif isinstance(value, bool):\n        return str(value)\n    elif isinstance(value, (decimal.Decimal, float, int)):\n        if use_l10n is False:\n            return str(value)\n        return number_format(value, use_l10n=use_l10n)\n    elif isinstance(value, datetime.datetime):\n        return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)\n    elif isinstance(value, datetime.date):\n        return date_format(value, use_l10n=use_l10n)\n    elif isinstance(value, datetime.time):\n        return time_format(value, use_l10n=use_l10n)\n    return value",
    "docstring": "Check if value is a localizable type (date, number...) and return it formatted as a string using current locale format. If use_l10n is provided and is not None, it forces the value to be localized (or not), otherwise it's always localized.",
    "type": "function",
    "file_path": "django\\django\\utils\\formats.py",
    "ast_data": "FunctionDef name:localize arg:value arg:use_l10n arguments arg arg If Call Return return:yes If Call Return return:yes Call If Call If Compare Return return:yes Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_get_indx",
    "source_code": "def _get_indx(_lambda, num, largest):\n    ii = np.argsort(_lambda)\n    if largest:\n        ii = ii[:-num - 1:-1]\n    else:\n        ii = ii[:num]\n    return ii",
    "docstring": "Get indices into depending on option.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_eigen\\lobpcg\\lobpcg.py",
    "ast_data": "FunctionDef name:_get_indx arg:_lambda arg:num arg:largest arguments arg arg arg Assign Call If Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "get_previous_day",
    "source_code": "def get_previous_day(self, date):\n    return _get_next_prev(self, date, is_previous=True, period='day')",
    "docstring": "Get the previous valid day.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_previous_day arg:self arg:date arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y=None):\n    return np.sum(self.score_samples(X))",
    "docstring": "Compute the total log-likelihood under the model. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : None Ignored. This parameter exists only for compatibility with :class:. Returns ------- logprob : float Total log-likelihood of the data in X. This is normalized to be a probability density, so the value will be low for high-dimensional data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_kde.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_device_backend_map",
    "source_code": "def get_device_backend_map(self) -> dict[str, Backend]:\n    return self.device_backend_map",
    "docstring": "Return backend map of the device.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:get_device_backend_map arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "instantiate_ray_dataloader",
    "source_code": "def instantiate_ray_dataloader(dataset: RayDataset, batch_size: int=1, shuffle: bool=True) -> DataLoader[RayGroup]:\n\n    def collate_rays(items: List[RayGroup]) -> RayGroup:\n        return items[0]\n    if TYPE_CHECKING:\n        return DataLoader(dataset)\n    else:\n        return DataLoader(dataset, sampler=BatchSampler(RandomSampler(dataset) if shuffle else SequentialSampler(dataset), batch_size, drop_last=False), collate_fn=collate_rays)",
    "docstring": "Initialize a dataloader to manage a ray dataset. Args: dataset: A ray dataset: RayDataset batch_size: Number of rays to sample in a batch: int shuffle: Whether to shuffle rays or sample then sequentially: bool",
    "type": "function",
    "file_path": "kornia\\kornia\\nerf\\data_utils.py",
    "ast_data": "FunctionDef name:instantiate_ray_dataloader arg:dataset arg:batch_size arg:shuffle arguments arg arg arg FunctionDef name:collate_rays arg:items arguments arg Return return:yes If Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "match_score",
    "source_code": "@property\ndef match_score(self) -> int | None:\n    return self._matching_score",
    "docstring": "The matching score of the OnnxSchemaChecker . If this remains None, it means the matching score has not been calculated, and it's not a nearest match candidate. Returns: The matching score of the OnnxSchemaChecker .",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py",
    "ast_data": "FunctionDef name:match_score arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "named_transformers_",
    "source_code": "@property\ndef named_transformers_(self):\n    return Bunch(**{name: trans for name, trans, _ in self.transformers_})",
    "docstring": "Access the fitted transformer by name. Read-only attribute to access any transformer by given name. Keys are transformer names and values are the fitted transformer objects.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:named_transformers_ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "unpack_1tuple",
    "source_code": "def unpack_1tuple(tup):\n    if len(tup) == 1 and isinstance(tup[0], slice):\n        if isinstance(tup, list):\n            raise ValueError('Indexing with a single-item list containing a slice is not allowed. Pass a tuple instead.')\n        return tup[0]\n    return tup",
    "docstring": "If we have a length-1 tuple/list that contains a slice, unpack to just the slice. Notes ----- The list case is deprecated.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexers\\utils.py",
    "ast_data": "FunctionDef name:unpack_1tuple arg:tup arguments arg If BoolOp Compare Call Call If Call Raise Call Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "apply_transform_class",
    "source_code": "def apply_transform_class(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    return input",
    "docstring": "Process class tags corresponding to the inputs that are transformed.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py",
    "ast_data": "FunctionDef name:apply_transform_class arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "find_class",
    "source_code": "def find_class(self, module: str, name: str) -> Any:\n    symbol_map = {'WorkerTimerArgs': WorkerTimerArgs, 'WorkerOutput': WorkerOutput, 'WorkerFailure': WorkerFailure}\n    if name in symbol_map:\n        return symbol_map[name]\n    return super().find_class(module, name)",
    "docstring": "Resolve import for pickle. When the main runner uses a symbol from this file, it sees it as . However the worker (called as a standalone file) sees the same symbol as . We have to help pickle understand that they refer to the same symbols.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\worker\\main.py",
    "ast_data": "FunctionDef name:find_class arg:self arg:module arg:name arguments arg arg arg Assign If Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "GraphDefSplitter",
    "source_code": "class GraphDefSplitter(split.ComposableSplitter):\n\n    def build_chunks(self):\n        proto = self._proto\n        if not isinstance(proto, graph_pb2.GraphDef):\n            raise TypeError('Can only split GraphDef type protos.')\n        proto_size = proto.ByteSize()\n        if proto_size < constants.max_size():\n            return\n        node_splitter = RepeatedMessageSplitter(proto, 'node', [ConstantNodeDefSplitter, LargeMessageSplitter], parent_splitter=self, fields_in_parent=[])\n        function_splitter = RepeatedMessageSplitter(proto.library, ['function'], [FunctionDefSplitter], parent_splitter=self, fields_in_parent=['library'])\n        library_size = proto.library.ByteSize()\n        approx_node_size = proto_size - library_size\n        if library_size > approx_node_size:\n            library_size -= function_splitter.build_chunks()\n            if library_size + approx_node_size > constants.max_size():\n                approx_node_size -= node_splitter.build_chunks()\n        else:\n            approx_node_size -= node_splitter.build_chunks()\n            if library_size + approx_node_size > constants.max_size():\n                library_size -= function_splitter.build_chunks()\n        if proto.ByteSize() > constants.max_size():\n            self.add_chunk(proto.library, ['library'], 1)\n            proto.ClearField('library')",
    "docstring": "Implements proto splitter for GraphDef. This Splitter will modify the passed in proto in place.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split_graph_def.py",
    "ast_data": "ClassDef name:GraphDefSplitter FunctionDef name:build_chunks arg:self arguments arg Assign If Call Raise Call Assign Call If Compare Call Return return:no Assign Call Assign Call Assign Call Assign If Compare Call If Compare Call Call Call If Compare Call Call If Compare Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "skewtest",
    "source_code": "def skewtest(a, axis=0, alternative='two-sided'):\n    a, axis = _chk_asarray(a, axis)\n    if axis is None:\n        a = a.ravel()\n        axis = 0\n    b2 = skew(a, axis)\n    n = a.count(axis)\n    if np.min(n) < 8:\n        raise ValueError(f'skewtest is not valid with less than 8 samples; {np.min(n)} samples were given.')\n    y = b2 * ma.sqrt((n + 1) * (n + 3) / (6.0 * (n - 2)))\n    beta2 = 3.0 * (n * n + 27 * n - 70) * (n + 1) * (n + 3) / ((n - 2.0) * (n + 5) * (n + 7) * (n + 9))\n    W2 = -1 + ma.sqrt(2 * (beta2 - 1))\n    delta = 1 / ma.sqrt(0.5 * ma.log(W2))\n    alpha = ma.sqrt(2.0 / (W2 - 1))\n    y = ma.where(y == 0, 1, y)\n    Z = delta * ma.log(y / alpha + ma.sqrt((y / alpha) ** 2 + 1))\n    pvalue = scipy.stats._stats_py._get_pvalue(Z, distributions.norm, alternative)\n    return SkewtestResult(Z[()], pvalue[()])",
    "docstring": "Tests whether the skew is different from the normal distribution. Parameters ---------- a : array_like The data to be tested axis : int or None, optional Axis along which statistics are calculated. Default is 0. If None, compute over the whole array . alternative : {'two-sided', 'less', 'greater'}, optional Defines the alternative hypothesis. Default is 'two-sided'. The following options are available: * 'two-sided': the skewness of the distribution underlying the sample is different from that of the normal distribution (i.e. 0) * 'less': the skewness of the distribution underlying the sample is less than that of the normal distribution * 'greater': the skewness of the distribution underlying the sample is greater than that of the normal distribution .. versionadded:: 1.7.0 Returns ------- statistic : array_like The computed z-score for this test. pvalue : array_like A p-value for the hypothesis test Notes ----- For more details about , see .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:skewtest arg:a arg:axis arg:alternative arguments arg arg arg Assign Call If Compare Assign Call Assign Assign Call Assign Call If Compare Call Raise Call Call Assign Call Assign Assign Call Assign Call Call Assign Call Assign Call Compare Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "to_dense",
    "source_code": "def to_dense(self) -> DataFrame:\n    data = {k: v.array.to_dense() for k, v in self._parent.items()}\n    return self._parent._constructor(data, index=self._parent.index, columns=self._parent.columns)",
    "docstring": "Convert a DataFrame with sparse values to dense. Returns ------- DataFrame A DataFrame with the same values stored as dense arrays. See Also -------- DataFrame.sparse.density : Ratio of non-sparse points to total (dense) data points. Examples -------- >>> df = pd.DataFrame({\"A\": pd.arrays.SparseArray([0, 1, 0])}) >>> df.sparse.to_dense() A 0 0 1 1 2 0",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\accessor.py",
    "ast_data": "FunctionDef name:to_dense arg:self arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "variable",
    "source_code": "@doc_controls.do_not_generate_docs\ndef variable(value, dtype=None, name=None, constraint=None):\n    if dtype is None:\n        dtype = floatx()\n    if hasattr(value, 'tocoo'):\n        sparse_coo = value.tocoo()\n        indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(sparse_coo.col, 1)), 1)\n        v = sparse_tensor.SparseTensor(indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)\n        v._keras_shape = sparse_coo.shape\n        return v\n    v = variables_module.Variable(value, dtype=dtypes_module.as_dtype(dtype), name=name, constraint=constraint)\n    if isinstance(value, np.ndarray):\n        v._keras_shape = value.shape\n    elif hasattr(value, 'shape'):\n        v._keras_shape = int_shape(value)\n    track_variable(v)\n    return v",
    "docstring": "Instantiates a variable and returns it. Args: value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. Returns: A variable instance (with Keras metadata included). Examples: >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val, dtype='float64', ... name='example_var') >>> tf.keras.backend.dtype(kvar) 'float64' >>> print(kvar)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:variable arg:value arg:dtype arg:name arg:constraint arguments arg arg arg arg If Compare Assign Call If Call Assign Call Assign Call Call Call Assign Call Assign Return return:yes Assign Call Call If Call Assign If Call Assign Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_fromroots",
    "source_code": "def _fromroots(line_f, mul_f, roots):\n    if len(roots) == 0:\n        return np.ones(1)\n    else:\n        [roots] = as_series([roots], trim=False)\n        roots.sort()\n        p = [line_f(-r, 1) for r in roots]\n        n = len(p)\n        while n > 1:\n            m, r = divmod(n, 2)\n            tmp = [mul_f(p[i], p[i + m]) for i in range(m)]\n            if r:\n                tmp[0] = mul_f(tmp[0], p[-1])\n            p = tmp\n            n = m\n        return p[0]",
    "docstring": "Helper function used to implement the `` functions for more detail",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polyutils.py",
    "ast_data": "FunctionDef name:_fromroots arg:line_f arg:mul_f arg:roots arguments arg arg arg If Compare Call Return return:yes Call Assign Call Call Assign Call Assign Call While Compare Assign Call Assign Call Call If Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "colorize",
    "source_code": "def colorize(text='', opts=(), **kwargs):\n    code_list = []\n    if text == '' and len(opts) == 1 and (opts[0] == 'reset'):\n        return '\\x1b[%sm' % RESET\n    for k, v in kwargs.items():\n        if k == 'fg':\n            code_list.append(foreground[v])\n        elif k == 'bg':\n            code_list.append(background[v])\n    for o in opts:\n        if o in opt_dict:\n            code_list.append(opt_dict[o])\n    if 'noreset' not in opts:\n        text = '%s\\x1b[%sm' % (text or '', RESET)\n    return '%s%s' % ('\\x1b[%sm' % ';'.join(code_list), text or '')",
    "docstring": "Return your text, enclosed in ANSI graphics codes. Depends on the keyword arguments 'fg' and 'bg', and the contents of the opts tuple/list. Return the RESET code if no parameters are given. Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold' 'underscore' 'blink' 'reverse' 'conceal' 'noreset' - string will not be auto-terminated with the RESET code Examples: colorize('hello', fg='red', bg='blue', opts=('blink',)) colorize() colorize('goodbye', opts=('underscore',)) print(colorize('first line', fg='red', opts=('noreset',))) print('this should be red too') print(colorize('and so should this')) print('this should not be red')",
    "type": "function",
    "file_path": "django\\django\\utils\\termcolors.py",
    "ast_data": "FunctionDef name:colorize arg:text arg:opts arguments arg arg arg Assign If BoolOp Compare Compare Call Compare Return return:yes For Call If Compare Call If Compare Call For If Compare Call If Compare Assign BoolOp Return return:yes Call BoolOp"
  },
  {
    "library": "scikit-learn",
    "name": "_estimator_has",
    "source_code": "def _estimator_has(attr, *, delegates=('estimator_', 'estimator')):\n\n    def check(self):\n        for delegate in delegates:\n            if hasattr(self, delegate):\n                delegator = getattr(self, delegate)\n                if isinstance(delegator, Sequence):\n                    return getattr(delegator[0], attr)\n                else:\n                    return getattr(delegator, attr)\n        raise ValueError(f'None of the delegates {delegates} are present in the class.')\n    return check",
    "docstring": "Check if we can delegate a method to the underlying estimator. We check the in the order they are passed. By default, we first check the fitted estimator if available, otherwise we check the unfitted estimator. Parameters ---------- attr : str Name of the attribute the delegate might or might not have. delegates: tuple of str, default=(\"estimator_\", \"estimator\") A tuple of sub-estimator(s) to check if we can delegate the method. Returns ------- check : function Function to check if the delegate has the attribute. Raises ------ ValueError Raised when none of the delegates are present in the object.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_estimator_has arg:attr arguments arg arg FunctionDef name:check arg:self arguments arg For If Call Assign Call If Call Return return:yes Call Return return:yes Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "exit",
    "source_code": "def exit(tensor, name=None):\n    tensor = ops.internal_convert_to_tensor_or_composite(tensor, as_ref=True)\n    if isinstance(tensor, tensor_lib.Tensor):\n        if tensor.dtype._is_ref_dtype:\n            return gen_control_flow_ops.ref_exit(tensor, name)\n        else:\n            return gen_control_flow_ops._exit(tensor, name)\n    elif isinstance(tensor, composite_tensor.CompositeTensor):\n        return nest.map_structure(exit, tensor, expand_composites=True)\n    else:\n        raise TypeError(f\"'tensor' must be a Tensor or CompositeTensor. Received: {type(tensor)}.\")",
    "docstring": "Exits the current frame to its parent frame. Exit makes its input available to the parent frame. Args: tensor: The tensor to be made available to the parent frame. name: A name for this operation (optional). Returns: The same tensor as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:exit arg:tensor arg:name arguments arg arg Assign Call If Call If Return return:yes Call Return return:yes Call If Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "control_dependency_on_returns",
    "source_code": "def control_dependency_on_returns(return_value):\n\n    def control_dependency_handle(t):\n        if isinstance(t, tensor_array_ops.TensorArray):\n            return t.flow\n        return t\n    if return_value is None:\n        return contextlib.contextmanager(lambda: (yield))()\n    if not isinstance(return_value, (list, tuple)):\n        return_value = (return_value,)\n    return_value = tuple((control_dependency_handle(t) for t in return_value))\n    return ops.control_dependencies(return_value)",
    "docstring": "Create a TF control dependency on the return values of a function. If the function had no return value, a no-op context is returned. Args: return_value: The return value to set as control dependency. Returns: A context manager.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\utils\\context_managers.py",
    "ast_data": "FunctionDef name:control_dependency_on_returns arg:return_value arguments arg FunctionDef name:control_dependency_handle arg:t arguments arg If Call Return return:yes Return return:yes If Compare Return return:yes Call Call arguments If Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_avg_pool_flops",
    "source_code": "@ops.RegisterStatistics('AvgPool', 'flops')\ndef _avg_pool_flops(graph, node):\n    return _pool_flops(graph, node)",
    "docstring": "Compute flops for AvgPool operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_avg_pool_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_apply_fn",
    "source_code": "def _apply_fn(dataset):\n    return _GroupByReducerDataset(dataset, key_func, reducer)",
    "docstring": "Function from to that applies the transformation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\grouping.py",
    "ast_data": "FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "numpy_to_tensor",
    "source_code": "def numpy_to_tensor(value):\n    assert np is not None\n    if isinstance(value, np.ndarray):\n        return torch.as_tensor(value)\n    if isinstance(value, tnp.ndarray):\n        return value.tensor\n    elif isinstance(value, (tuple, list)):\n        return type(value)((numpy_to_tensor(obj) for obj in value))\n    else:\n        return value",
    "docstring": "Convert tnp.ndarray to tensor, leave other types intact. If a list/tuple, loop through it to convert.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:numpy_to_tensor arg:value arguments arg Compare If Call Return return:yes Call If Call Return return:yes If Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "InternalError",
    "source_code": "@tf_export('errors.InternalError')\nclass InternalError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(InternalError, self).__init__(node_def, op, message, INTERNAL, *args)",
    "docstring": "Raised when the system experiences an internal error. This exception is raised when some invariant expected by the runtime has been broken. Catching this exception is not recommended.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:InternalError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "django",
    "name": "MessageDecoder",
    "source_code": "class MessageDecoder(json.JSONDecoder):\n\n    def process_messages(self, obj):\n        if isinstance(obj, list) and obj:\n            if obj[0] == MessageEncoder.message_key:\n                if obj[1]:\n                    obj[3] = mark_safe(obj[3])\n                return Message(*obj[2:])\n            return [self.process_messages(item) for item in obj]\n        if isinstance(obj, dict):\n            return {key: self.process_messages(value) for key, value in obj.items()}\n        return obj\n\n    def decode(self, s, **kwargs):\n        decoded = super().decode(s, **kwargs)\n        return self.process_messages(decoded)",
    "docstring": "Decode JSON that includes serialized `` instances.",
    "type": "class",
    "file_path": "django\\django\\contrib\\messages\\storage\\cookie.py",
    "ast_data": "ClassDef name:MessageDecoder FunctionDef name:process_messages arg:self arg:obj arguments arg arg If BoolOp Call If Compare If Assign Call Return return:yes Call Return return:yes Call If Call Return return:yes Call Call Return return:yes FunctionDef name:decode arg:self arg:s arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "LimitedStream",
    "source_code": "class LimitedStream(IOBase):\n\n    def __init__(self, stream, limit):\n        self._read = stream.read\n        self._readline = stream.readline\n        self._pos = 0\n        self.limit = limit\n\n    def read(self, size=-1, /):\n        _pos = self._pos\n        limit = self.limit\n        if _pos >= limit:\n            return b''\n        if size == -1 or size is None:\n            size = limit - _pos\n        else:\n            size = min(size, limit - _pos)\n        data = self._read(size)\n        self._pos += len(data)\n        return data\n\n    def readline(self, size=-1, /):\n        _pos = self._pos\n        limit = self.limit\n        if _pos >= limit:\n            return b''\n        if size == -1 or size is None:\n            size = limit - _pos\n        else:\n            size = min(size, limit - _pos)\n        line = self._readline(size)\n        self._pos += len(line)\n        return line",
    "docstring": "Wrap another stream to disallow reading it past a number of bytes. Based on the implementation from werkzeug.wsgi.LimitedStream See",
    "type": "class",
    "file_path": "django\\django\\core\\handlers\\wsgi.py",
    "ast_data": "ClassDef name:LimitedStream FunctionDef name:__init__ arg:self arg:stream arg:limit arguments arg arg arg Assign Assign Assign Assign FunctionDef name:read arguments arg arg Assign Assign If Compare Return return:yes If BoolOp Compare Compare Assign Assign Call Assign Call Call Return return:yes FunctionDef name:readline arguments arg arg Assign Assign If Compare Return return:yes If BoolOp Compare Compare Assign Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "function_call_options",
    "source_code": "@property\ndef function_call_options(self):\n    if self._thread_local_data.function_call_options is None:\n        config = self.config\n        if self._soft_device_placement is None:\n            config.allow_soft_placement = True\n        self._thread_local_data.function_call_options = FunctionCallOptions(config_proto=config)\n    return self._thread_local_data.function_call_options",
    "docstring": "Returns function call options for current thread. Note that the returned object is still referenced by the eager context. Returns: the FunctionCallOptions for current thread.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:function_call_options arg:self arguments arg If Compare Assign If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "update_from_data_y",
    "source_code": "def update_from_data_y(self, y, ignore=None):\n    y = np.ravel(y)\n    self.update_from_data_xy(np.array([y, y]).T, ignore=ignore, updatex=False)",
    "docstring": "Update the y-bounds of the based on the passed in data. After updating, the bounds will have positive *height*, and *y0* will be the minimal value. Parameters ---------- y : Array of y-values. ignore : bool, optional - When `BboxBboxignore`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:update_from_data_y arg:self arg:y arg:ignore arguments arg arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    raise NotImplementedError('Must be implemented in descendants')",
    "docstring": "Creates an iterator for the . The returned iterator implements the Python Iterator protocol. Example usage: >>> global_batch_size = 4 >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"]) >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4]).repeat().batch(global_batch_size) >>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset)) >>> print(next(distributed_iterator)) PerReplica:{ 0: tf.Tensor([1 2], shape=(2,), dtype=int32), 1: tf.Tensor([3 4], shape=(2,), dtype=int32) } Returns: An instance for the given object to enumerate over the distributed data.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "as_dot",
    "source_code": "def as_dot(self):\n    result = 'digraph CFG {\\n'\n    for node in self.index.values():\n        result += '  %s [label=\"%s\"];\\n' % (id(node), node)\n    for node in self.index.values():\n        for next_ in node.next:\n            result += '  %s -> %s;\\n' % (id(node), id(next_))\n    result += '}'\n    return result",
    "docstring": "Print CFG in DOT format.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:as_dot arg:self arguments arg Assign For Call Call For Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "isclass",
    "source_code": "def isclass(object):\n    return _inspect.isclass(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.isclass.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:isclass arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "tpu_device_ordinal_at_coordinates",
    "source_code": "def tpu_device_ordinal_at_coordinates(self, device_coordinates):\n    return self._topology_devices[tuple(device_coordinates)]",
    "docstring": "Returns the TensorFlow device number at . Args: device_coordinates: An integer sequence describing a device's physical coordinates in the TPU fabric. Returns: Returns the TensorFlow device number within the task corresponding to attached to the device with those physical coordinates.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py",
    "ast_data": "FunctionDef name:tpu_device_ordinal_at_coordinates arg:self arg:device_coordinates arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_MinimumGrad",
    "source_code": "@ops.RegisterGradient('Minimum')\ndef _MinimumGrad(op: ops.Operation, grad):\n    return _MaximumMinimumGrad(op, grad, math_ops.less_equal)",
    "docstring": "Returns grad*(x y) with type of grad.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_MinimumGrad arg:op arg:grad arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "get_client_id",
    "source_code": "def get_client_id(self):\n    raise NotImplementedError()",
    "docstring": "A method to get the client_id associated with this credential. For instance, the table in the database has a column ``:: def get_client_id(self): return self.client_id",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\models.py",
    "ast_data": "FunctionDef name:get_client_id arg:self arguments arg Raise Call"
  },
  {
    "library": "sphinx",
    "name": "get_self",
    "source_code": "def get_self(self) -> ast.arg | None:\n    if self.current_function and self.current_function.args.args:\n        return self.current_function.args.args[0]\n    if self.current_function and self.current_function.args.posonlyargs:\n        return self.current_function.args.posonlyargs[0]\n    return None",
    "docstring": "Returns the name of the first argument if in a function.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:get_self arg:self arguments arg If BoolOp Return return:yes If BoolOp Return return:yes Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "transform_path_non_affine",
    "source_code": "def transform_path_non_affine(self, path):\n    x = self.transform_non_affine(path.vertices)\n    return Path._fast_from_codes_and_verts(x, path.codes, path)",
    "docstring": "Apply the non-affine part of this transform to *path*, returning a new . ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:transform_path_non_affine arg:self arg:path arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "inv",
    "source_code": "def inv(self, x):\n    raise NotImplementedError",
    "docstring": "Map points `` is the function after the transformation is applied, then:: f(x) = g(self.inv(x))",
    "type": "method",
    "file_path": "scipy\\scipy\\integrate\\_cubature.py",
    "ast_data": "FunctionDef name:inv arg:self arg:x arguments arg arg Raise"
  },
  {
    "library": "scrapy",
    "name": "remove_from_list",
    "source_code": "def remove_from_list(self, name: _SettingsKeyT, item: Any) -> None:\n    value: list[str] = self.getlist(name)\n    if item not in value:\n        raise ValueError(f'{item!r} not found in the {name} setting ({value!r}).')\n    self.set(name, [v for v in value if v != item], self.getpriority(name) or 0)",
    "docstring": "Remove *item* from the :class: setting with the specified *name*. If *item* is missing, raise :exc:. This change is applied regardless of the priority of the *name* setting. The setting priority is not affected by this change either.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:remove_from_list arg:self arg:name arg:item arguments arg arg arg Call If Compare Raise Call Call Compare BoolOp Call"
  },
  {
    "library": "numpy",
    "name": "_get_indexes",
    "source_code": "def _get_indexes(arr, virtual_indexes, valid_values_count):\n    previous_indexes = np.asanyarray(np.floor(virtual_indexes))\n    next_indexes = np.asanyarray(previous_indexes + 1)\n    indexes_above_bounds = virtual_indexes >= valid_values_count - 1\n    if indexes_above_bounds.any():\n        previous_indexes[indexes_above_bounds] = -1\n        next_indexes[indexes_above_bounds] = -1\n    indexes_below_bounds = virtual_indexes < 0\n    if indexes_below_bounds.any():\n        previous_indexes[indexes_below_bounds] = 0\n        next_indexes[indexes_below_bounds] = 0\n    if np.issubdtype(arr.dtype, np.inexact):\n        virtual_indexes_nans = np.isnan(virtual_indexes)\n        if virtual_indexes_nans.any():\n            previous_indexes[virtual_indexes_nans] = -1\n            next_indexes[virtual_indexes_nans] = -1\n    previous_indexes = previous_indexes.astype(np.intp)\n    next_indexes = next_indexes.astype(np.intp)\n    return (previous_indexes, next_indexes)",
    "docstring": "Get the valid indexes of arr neighbouring virtual_indexes. Note This is a companion function to linear interpolation of Quantiles Returns ------- (previous_indexes, next_indexes): Tuple A Tuple of virtual_indexes neighbouring indexes",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_get_indexes arg:arr arg:virtual_indexes arg:valid_values_count arguments arg arg arg Assign Call Call Assign Call Assign Compare If Call Assign Assign Assign Compare If Call Assign Assign If Call Assign Call If Call Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "bench_isotonic_regression",
    "source_code": "def bench_isotonic_regression(Y):\n    gc.collect()\n    tstart = default_timer()\n    isotonic_regression(Y)\n    return default_timer() - tstart",
    "docstring": "Runs a single iteration of isotonic regression on the input data, and reports the total time taken (in seconds).",
    "type": "function",
    "file_path": "scikit-learn\\benchmarks\\bench_isotonic.py",
    "ast_data": "FunctionDef name:bench_isotonic_regression arg:Y arguments arg Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_DefaultReplicaContext",
    "source_code": "class _DefaultReplicaContext(ReplicaContext):\n\n    @property\n    def replica_id_in_sync_group(self):\n        return 0",
    "docstring": "ReplicaContext for _DefaultDistributionStrategy.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "ClassDef name:_DefaultReplicaContext FunctionDef name:replica_id_in_sync_group arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ndarrays_to_tensors",
    "source_code": "def ndarrays_to_tensors(*inputs):\n    from ._ndarray import ndarray\n    if len(inputs) == 0:\n        return ValueError()\n    elif len(inputs) == 1:\n        input_ = inputs[0]\n        if isinstance(input_, ndarray):\n            return input_.tensor\n        elif isinstance(input_, tuple):\n            result = []\n            for sub_input in input_:\n                sub_result = ndarrays_to_tensors(sub_input)\n                result.append(sub_result)\n            return tuple(result)\n        else:\n            return input_\n    else:\n        assert isinstance(inputs, tuple)\n        return ndarrays_to_tensors(inputs)",
    "docstring": "Convert all ndarrays from to tensors. (other things are intact)",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_util.py",
    "ast_data": "FunctionDef name:ndarrays_to_tensors arguments arg If Compare Call Return return:yes Call If Compare Call Assign If Call Return return:yes If Call Assign For Assign Call Call Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "construct_instance",
    "source_code": "def construct_instance(form, instance, fields=None, exclude=None):\n    from django.db import models\n    opts = instance._meta\n    cleaned_data = form.cleaned_data\n    file_field_list = []\n    for f in opts.fields:\n        if not f.editable or isinstance(f, models.AutoField) or f.name not in cleaned_data:\n            continue\n        if fields is not None and f.name not in fields:\n            continue\n        if exclude and f.name in exclude:\n            continue\n        if f.has_default() and form[f.name].field.widget.value_omitted_from_data(form.data, form.files, form.add_prefix(f.name)) and (cleaned_data.get(f.name) in form[f.name].field.empty_values):\n            continue\n        if isinstance(f, models.FileField):\n            file_field_list.append(f)\n        else:\n            f.save_form_data(instance, cleaned_data[f.name])\n    for f in file_field_list:\n        f.save_form_data(instance, cleaned_data[f.name])\n    return instance",
    "docstring": "Construct and return a model instance from the bound ``, but do not save the returned instance to the database.",
    "type": "function",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:construct_instance arg:form arg:instance arg:fields arg:exclude arguments arg arg arg arg Assign Assign Assign For If BoolOp Call Compare If BoolOp Compare Compare If BoolOp Compare If BoolOp Call Call Call Compare Call If Call Call Call For Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_lr",
    "source_code": "@override\ndef get_lr(self) -> list[float]:\n    _warn_get_lr_called_within_step(self)\n    if self._is_initial:\n        return [group['lr'] for group in self.optimizer.param_groups]\n    return [group['lr'] * self.gamma for group in self.optimizer.param_groups]",
    "docstring": "Compute the learning rate of each parameter group.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:get_lr arg:self arguments arg Call If Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "ewkt",
    "source_code": "@property\ndef ewkt(self):\n    srid = self.srid\n    return 'SRID=%s;%s' % (srid, self.wkt) if srid else self.wkt",
    "docstring": "Return the EWKT (SRID + WKT) of the Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:ewkt arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_load_layer",
    "source_code": "def _load_layer(self, node_id, identifier, metadata):\n    metadata = json_utils.decode(metadata)\n    if node_id in self.loaded_nodes:\n        node, setter = self.loaded_nodes[node_id]\n        _maybe_add_serialized_attributes(node, metadata)\n        config = metadata.get('config')\n        if _is_graph_network(node) and generic_utils.validate_config(config):\n            child_nodes = self._get_child_layer_node_ids(node_id)\n            self.model_layer_dependencies[node_id] = (node, child_nodes)\n            if not child_nodes:\n                self._models_to_reconstruct.append(node_id)\n        return (node, setter)\n    obj, setter = self._revive_from_config(identifier, metadata, node_id)\n    if obj is None:\n        obj, setter = revive_custom_object(identifier, metadata)\n    _maybe_add_serialized_attributes(obj, metadata)\n    return (obj, setter)",
    "docstring": "Load a single layer from a SavedUserObject proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_load_layer arg:self arg:node_id arg:identifier arg:metadata arguments arg arg arg arg Assign Call If Compare Assign Call Assign Call If BoolOp Call Call Assign Call Assign If Call Return return:yes Assign Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ZipDataset",
    "source_code": "class _ZipDataset(dataset_ops.DatasetV2):\n\n    def __init__(self, datasets, name=None):\n        for ds in nest.flatten(datasets):\n            if not isinstance(ds, data_types.DatasetV2):\n                if isinstance(ds, list):\n                    raise TypeError('Invalid input to `zip`. Inputs are expected to be (nested) structures of `tf.data.Dataset` objects. Python `list` is not supported and you should use `tuple` instead.')\n                else:\n                    raise TypeError(f'Invalid input to `zip`. Inputs are expected to be (nested) structures of `tf.data.Dataset` objects but encountered object of type {type(ds)}.')\n        self._datasets = datasets\n        self._structure = nest.pack_sequence_as(self._datasets, [ds.element_spec for ds in nest.flatten(self._datasets)])\n        self._name = name\n        variant_tensor = gen_dataset_ops.zip_dataset([ds._variant_tensor for ds in nest.flatten(self._datasets)], **self._common_args)\n        super().__init__(variant_tensor)\n\n    def _inputs(self):\n        return nest.flatten(self._datasets)\n\n    @property\n    def element_spec(self):\n        return self._structure",
    "docstring": "A that zips its inputs together.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\zip_op.py",
    "ast_data": "ClassDef name:_ZipDataset FunctionDef name:__init__ arg:self arg:datasets arg:name arguments arg arg arg For Call If Call If Call Raise Call Raise Call Call Assign Assign Call Call Assign Assign Call Call Call Call FunctionDef name:_inputs arg:self arguments arg Return return:yes Call FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_shapes",
    "source_code": "def get_shapes(tensors):\n    return nest.map_structure(lambda x: x.shape, tensors)",
    "docstring": "Gets shapes from tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:get_shapes arg:tensors arguments arg Return return:yes Call arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "get_session_config_with_timeout",
    "source_code": "def get_session_config_with_timeout(timeout_in_secs, cluster_def):\n    config_proto = config_pb2.ConfigProto(operation_timeout_in_ms=timeout_in_secs, cluster_def=cluster_def)\n    return config_proto",
    "docstring": "Returns a session given a timeout and a cluster configuration.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_system_metadata.py",
    "ast_data": "FunctionDef name:get_session_config_with_timeout arg:timeout_in_secs arg:cluster_def arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "to_clipboard",
    "source_code": "@final\ndef to_clipboard(self, *, excel: bool=True, sep: str | None=None, **kwargs) -> None:\n    from pandas.io import clipboards\n    clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)",
    "docstring": "Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True Produce output in a csv format for easy pasting into excel. - True, use the provided separator for csv pasting. - False, write a string representation of the object to the clipboard. sep : str, default `xclipxselPyQt4pyperclipindexpyperclip` package for any string output format. .. code-block:: python import pyperclip html = df.style.to_html() pyperclip.copy(html)",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:to_clipboard arg:self arguments arg arg arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_merge_with_spec",
    "source_code": "def _merge_with_spec(self, b):\n    a_spec = self._type_spec\n    if not a_spec.is_compatible_with(b):\n        raise ValueError('RowPartition and RowPartitionSpec are not compatible')\n    nrows = constant_op.constant(b.nrows, self.dtype) if b.nrows is not None else self._nrows\n    nvals = constant_op.constant(b.nvals, self.dtype) if b.nvals is not None else self._nvals\n    uniform_row_length = constant_op.constant(b.uniform_row_length, self.dtype) if b.uniform_row_length is not None else self._uniform_row_length\n    return RowPartition(row_splits=self._row_splits, row_lengths=self._row_lengths, value_rowids=self._value_rowids, nvals=nvals, uniform_row_length=uniform_row_length, nrows=nrows, internal=_row_partition_factory_key)",
    "docstring": "Merge with a TypeSpec to create a new RowPartition.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_merge_with_spec arg:self arg:b arguments arg arg Assign If Call Raise Call Assign Compare Call Assign Compare Call Assign Compare Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_child_layer_node_ids",
    "source_code": "def _get_child_layer_node_ids(self, node_id):\n    num_layers = 0\n    child_layers = {}\n    pattern = re.compile('layer-(\\\\d+)')\n    for child in self._proto.nodes[node_id].children:\n        m = pattern.match(child.local_name)\n        if m is None:\n            continue\n        layer_n = int(m.group(1))\n        num_layers = max(layer_n + 1, num_layers)\n        child_layers[layer_n] = child.node_id\n    ordered = []\n    for n in range(num_layers):\n        child = child_layers.get(n)\n        if child is None:\n            break\n        ordered.append(child)\n    return ordered",
    "docstring": "Returns the node ids of each layer in a Sequential/Functional model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_get_child_layer_node_ids arg:self arg:node_id arguments arg arg Assign Assign Assign Call For Assign Call If Compare Assign Call Call Assign Call Assign Assign For Call Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "sticky_edges",
    "source_code": "@property\ndef sticky_edges(self):\n    return self._sticky_edges",
    "docstring": "`` lists can be modified in place as needed. Examples -------- >>> artist.sticky_edges.x[:] = (xmin, xmax) >>> artist.sticky_edges.y[:] = (ymin, ymax)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:sticky_edges arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "validate_unique",
    "source_code": "def validate_unique(self, exclude=None):\n    unique_checks, date_checks = self._get_unique_checks(exclude=exclude)\n    errors = self._perform_unique_checks(unique_checks)\n    date_errors = self._perform_date_checks(date_checks)\n    for k, v in date_errors.items():\n        errors.setdefault(k, []).extend(v)\n    if errors:\n        raise ValidationError(errors)",
    "docstring": "Check unique constraints on the model and raise ValidationError if any failed.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\base.py",
    "ast_data": "FunctionDef name:validate_unique arg:self arg:exclude arguments arg arg Assign Call Assign Call Assign Call For Call Call Call If Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_dump_file_name_to_datum",
    "source_code": "def _dump_file_name_to_datum(self, dir_name, file_name):\n    debug_dump_rel_path = os.path.join(os.path.relpath(dir_name, self._dump_root), file_name)\n    return DebugTensorDatum(self._dump_root, debug_dump_rel_path)",
    "docstring": "Obtain a DebugTensorDatum from the directory and file name. Args: dir_name: () Name of the directory in which the dump file resides. file_name: () Base name of the dump file. Returns: () The loaded from the dump file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:_dump_file_name_to_datum arg:self arg:dir_name arg:file_name arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, closure, type_spec):\n    self._closure = closure\n    self._type_spec = type_spec\n    self._values = None\n    self._has_fetched_to_local = False\n    self._has_fetched_to_local_lock = threading.Lock()\n    self._fetched_tensors = None\n    self._error = None\n    self._status_available_event = threading.Event()\n    self._status = remote_value.RemoteValueStatus.NOT_READY",
    "docstring": "Initializes a . Args: closure: The closure from which the is created. type_spec: The type spec for this which is used to trace functions that take this as input.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:closure arg:type_spec arguments arg arg arg Assign Assign Assign Assign Assign Call Assign Assign Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "backend_for_gui_framework",
    "source_code": "def backend_for_gui_framework(self, framework):\n    return self._GUI_FRAMEWORK_TO_BACKEND.get(framework.lower())",
    "docstring": "Return the name of the backend corresponding to the specified GUI framework. Parameters ---------- framework : str GUI framework such as \"qt\". Returns ------- str or None Backend name or None if GUI framework not recognised.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\registry.py",
    "ast_data": "FunctionDef name:backend_for_gui_framework arg:self arg:framework arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "validate_translator",
    "source_code": "def validate_translator(t):\n    if not isinstance(t, str) or len(t) != 256:\n        raise ValueError('The translate argument must be a str of len 256.')",
    "docstring": "Ensure the translator is of the correct length and size.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:validate_translator arg:t arguments arg If BoolOp Call Compare Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "__setstate__",
    "source_code": "def __setstate__(self, state):\n    super().__setstate__(state)\n    if hasattr(self, 'X_thresholds_') and hasattr(self, 'y_thresholds_'):\n        self._build_f(self.X_thresholds_, self.y_thresholds_)",
    "docstring": "Pickle-protocol - set state of the estimator. We need to rebuild the interpolation function.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\isotonic.py",
    "ast_data": "FunctionDef name:__setstate__ arg:self arg:state arguments arg arg Call Call If BoolOp Call Call Call"
  },
  {
    "library": "pandas",
    "name": "extract_result",
    "source_code": "def extract_result(res):\n    if hasattr(res, '_values'):\n        res = res._values\n        if res.ndim == 1 and len(res) == 1:\n            res = res[0]\n    return res",
    "docstring": "Extract the result object, it might be a 0-dim ndarray or a len-1 0-dim, or a scalar",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\groupby\\ops.py",
    "ast_data": "FunctionDef name:extract_result arg:res arguments arg If Call Assign If BoolOp Compare Compare Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "emit_dispatch_call",
    "source_code": "def emit_dispatch_call(f: NativeFunction, input_base: str, unpacked_args: Sequence[str]) -> str:\n    dispatch_key_set = 'ks & c10::after_autograd_keyset'\n    call = CALL_REDISPATCH.substitute(api_name=cpp.name(f.func, faithful_name_for_out_overloads=True, symint_overload=f.func.has_symint()), unpacked_args=[dispatch_key_set] + list(unpacked_args))\n    return call",
    "docstring": "Dispatch call via function in a namespace or method on Tensor.",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\gen_variable_type.py",
    "ast_data": "FunctionDef name:emit_dispatch_call arg:f arg:input_base arg:unpacked_args arguments arg arg arg Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "policy_scope",
    "source_code": "@contextlib.contextmanager\ndef policy_scope(policy):\n    old_policy = _global_policy\n    try:\n        set_global_policy(policy)\n        yield\n    finally:\n        set_global_policy(old_policy)",
    "docstring": "A context manager that sets the global Policy under it. Args: policy: A Policy, or a string that will be converted to a Policy.. Yields: Nothing.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\policy.py",
    "ast_data": "FunctionDef name:policy_scope arg:policy arguments arg Assign Try Call Call"
  },
  {
    "library": "tensorflow",
    "name": "low",
    "source_code": "@property\ndef low(self):\n    return self._low",
    "docstring": "Lower boundary of the output interval.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\uniform.py",
    "ast_data": "FunctionDef name:low arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "global_variables",
    "source_code": "@property\ndef global_variables(self):\n    if self._variables_created:\n        return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, self.variable_scope_name)\n    else:\n        return []",
    "docstring": "Returns the list of global variables created by the Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:global_variables arg:self arguments arg If Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "get_runtime_arg_values",
    "source_code": "@override\ndef get_runtime_arg_values(self, **kwargs) -> list[Any]:\n    return [kwargs[arg.name] for arg in self.get_runtime_arg_info()]",
    "docstring": "Helper method to retrieve runtime args from generate kwargs",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_template.py",
    "ast_data": "FunctionDef name:get_runtime_arg_values arg:self arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_embedding_qat_module_mappings",
    "source_code": "def get_embedding_qat_module_mappings() -> dict[Callable, Any]:\n    mapping = copy.deepcopy(DEFAULT_QAT_MODULE_MAPPINGS)\n    mapping[nn.EmbeddingBag] = nnqat.EmbeddingBag\n    mapping[nn.Embedding] = nnqat.Embedding\n    return mapping",
    "docstring": "Get module mapping for quantization aware training This is includes default values in addition to enabling qat for embeddings.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py",
    "ast_data": "FunctionDef name:get_embedding_qat_module_mappings arguments Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_single_device_mesh",
    "source_code": "@classmethod\ndef from_single_device_mesh(cls, mesh: Mesh) -> 'Layout':\n    return cls._new_object(mesh=mesh)",
    "docstring": "Constructs a single device layout from a single device mesh.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py",
    "ast_data": "FunctionDef name:from_single_device_mesh arg:cls arg:mesh arguments arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__exit__",
    "source_code": "def __exit__(self, exc_type, exc_val, exc_tb):\n    map = cherrypy.serving.request.toolmaps.get(self.namespace)\n    if map:\n        for name, settings in map.items():\n            if settings.get('on', False):\n                tool = getattr(self, name)\n                tool._setup()",
    "docstring": "Run tool._setup() for each tool in our toolmap.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_val arg:exc_tb arguments arg arg arg arg Assign Call If For Call If Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "estimate_kernel_num_bytes",
    "source_code": "def estimate_kernel_num_bytes(self):\n    ninplace_args = len(unique(self.args.inplace_buffers.values()))\n    num_bytes = []\n    for i, inp in enumerate(itertools.chain(self.input_nodes, (self.output_node,))):\n        size = V.graph.sizevars.size_hints(inp.get_size())\n        numel = functools.reduce(operator.mul, size, 1)\n        dtype_size = get_dtype_size(inp.get_dtype())\n        num_bytes.append(numel * dtype_size * (1 + int(i < ninplace_args)))\n    return sum(num_bytes)",
    "docstring": "Estimate the total number of bytes this kernel takes. For in/out nodes, sizes are counted twice: once for reading and once for writing.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:estimate_kernel_num_bytes arg:self arguments arg Assign Call Call Call Assign For Call Call Assign Call Call Assign Call Assign Call Call Call Call Compare Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "get_keywords",
    "source_code": "def get_keywords():\n    git_refnames = '$Format:%d$'\n    git_full = '$Format:%H$'\n    git_date = '$Format:%ci$'\n    keywords = {'refnames': git_refnames, 'full': git_full, 'date': git_date}\n    return keywords",
    "docstring": "Get the keywords needed to look up the version information.",
    "type": "function",
    "file_path": "pandas\\pandas\\_version.py",
    "ast_data": "FunctionDef name:get_keywords arguments Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_parse_flags_tolerate_undef",
    "source_code": "def _parse_flags_tolerate_undef(argv):\n    return flags.FLAGS(_sys.argv if argv is None else argv, known_only=True)",
    "docstring": "Parse args, returning any unknown flags (ABSL defaults to crashing).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\app.py",
    "ast_data": "FunctionDef name:_parse_flags_tolerate_undef arg:argv arguments arg Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "get_bool",
    "source_code": "def get_bool(self):\n    return self.fdp.ConsumeBool()",
    "docstring": "Consume a bool. Returns: Consumed a bool based on input bytes and constraints.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\security\\fuzzing\\python_fuzzing.py",
    "ast_data": "FunctionDef name:get_bool arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_extract_next_file",
    "source_code": "def _extract_next_file(archive_file: io.BufferedIOBase) -> Iterator[Tuple[str, bytes]]:\n    while True:\n        header = archive_file.read(60)\n        if not header:\n            return\n        elif len(header) < 60:\n            raise RuntimeError('Invalid file header format.')\n        name, _, _, _, _, size, end = struct.unpack('=16s12s6s6s8s10s2s', header)\n        if end != b'`\\n':\n            raise RuntimeError('Invalid file header format.')\n        name = name.decode('ascii').strip()\n        size = int(size, base=10)\n        odd_size = size % 2 == 1\n        if name.startswith('#1/'):\n            filename_size = int(name[3:])\n            name = archive_file.read(filename_size).decode('utf-8').strip(' \\x00')\n            size -= filename_size\n        file_content = archive_file.read(size)\n        if odd_size:\n            archive_file.read(1)\n        yield (name, file_content)",
    "docstring": "Extracts the next available file from the archive. Reads the next available file header section and yields its filename and content in bytes as a tuple. Stops when there are no more available files in the provided archive_file. Args: archive_file: The archive file object, of which cursor is pointing to the next available file header section. Yields: The name and content of the next available file in the given archive file. Raises: RuntimeError: The archive_file is in an unknown format.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\ios\\extract_object_files.py",
    "ast_data": "FunctionDef name:_extract_next_file arg:archive_file arguments arg While Assign Call If Return return:no If Compare Call Raise Call Assign Call If Compare Raise Call Assign Call Call Assign Call Assign Compare If Call Assign Call Assign Call Call Call Assign Call If Call"
  },
  {
    "library": "pytorch",
    "name": "_sync_params",
    "source_code": "def _sync_params(self):\n    handles = []\n    for rank in range(self.world_size):\n        handles.extend(self._broadcast_params_from_rank(rank))\n    _ = [x.wait() for x in handles]",
    "docstring": "Sync all parameter shards across the ranks. This rank sends its shard of the parameters to all other ranks and receives a shard from each other rank. This is done using ``and sent parameter-by-parameter otherwise.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_sync_params arg:self arguments arg Assign For Call Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_canary_import",
    "source_code": "def _canary_import(self) -> None:\n    source_cmds: set[str] = set()\n    for w in self._work_items:\n        if w.source_cmd is not None:\n            source_cmds.add(f'{w.source_cmd} && ')\n    for source_cmd in source_cmds or {''}:\n        cmd = f'{source_cmd}{PYTHON_CMD} -c \"import torch\"'\n        proc = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8', executable=SHELL)\n        if proc.returncode:\n            raise ImportError(f'Failed to import torch in subprocess: {cmd}\\n{proc.stdout}')",
    "docstring": "Make sure we can import torch before launching a slew of workers.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\execution\\runner.py",
    "ast_data": "FunctionDef name:_canary_import arg:self arguments arg Call For If Compare Call For BoolOp Assign Assign Call If Raise Call"
  },
  {
    "library": "kornia",
    "name": "rgb_to_rgba",
    "source_code": "def rgb_to_rgba(image: Tensor, alpha_val: Union[float, Tensor]) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')\n    if not isinstance(alpha_val, (float, Tensor)):\n        raise TypeError(f'alpha_val type is not a float or Tensor. Got {type(alpha_val)}')\n    r, g, b = torch.chunk(image, image.shape[-3], dim=-3)\n    a: Tensor = cast(Tensor, alpha_val)\n    if isinstance(alpha_val, float):\n        a = torch.full_like(r, fill_value=float(alpha_val))\n    return torch.cat([r, g, b, a], dim=-3)",
    "docstring": "Convert an image from RGB to RGBA. Args: image: RGB Image to be converted to RGBA of shape :math:. alpha_val (float, Tensor): A float number for the alpha value or a tensor of shape :math:. Returns: RGBA version of the image with shape :math:. .. note:: The current functionality is NOT supported by Torchscript. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_rgba(input, 1.) # 2x4x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "FunctionDef name:rgb_to_rgba arg:image arg:alpha_val arguments arg arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call If Call Raise Call Call Assign Call Call If Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "take_indexed_slices_grad",
    "source_code": "def take_indexed_slices_grad(self, num_required, name=None):\n    return_val = gen_data_flow_ops.sparse_accumulator_take_gradient(self._accumulator_ref, num_required, dtype=self._dtype, name=name)\n    return indexed_slices.IndexedSlices(indices=return_val.indices, values=return_val.values, dense_shape=return_val.shape)",
    "docstring": "Attempts to extract the average gradient from the accumulator. The operation blocks until sufficient number of gradients have been successfully applied to the accumulator. Once successful, the following actions are also triggered: - Counter of accumulated gradients is reset to 0. - Aggregated gradient is reset to 0 tensor. - Accumulator's internal time step is incremented by 1. Args: num_required: Number of gradients that needs to have been aggregated name: Optional name for the operation Returns: An holding the value of the average gradient. Raises: InvalidArgumentError: If < 1",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:take_indexed_slices_grad arg:self arg:num_required arg:name arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "count_up_to",
    "source_code": "@tf_export(v1=['count_up_to'])\n@deprecated(None, 'Prefer Dataset.range instead.')\ndef count_up_to(ref, limit, name=None):\n    if ref.dtype._is_ref_dtype:\n        return gen_state_ops.count_up_to(ref, limit=limit, name=name)\n    return gen_state_ops.resource_count_up_to(ref.handle, limit, T=ref.dtype, name=name)",
    "docstring": "Increments 'ref' until it reaches 'limit'. Args: ref: A Variable. Must be one of the following types: , . Should be from a scalar node. limit: An . If incrementing ref would bring it above limit, instead generates an 'OutOfRange' error. name: A name for the operation (optional). Returns: A . Has the same type as . A copy of the input before increment. If nothing else modifies the input, the values produced will all be distinct.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py",
    "ast_data": "FunctionDef name:count_up_to arg:ref arg:limit arg:name arguments arg arg arg If Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    for i in range(len(self)):\n        yield self[i]",
    "docstring": "Allow iteration over this LineString.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\linestring.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_apply_gradients",
    "source_code": "def _apply_gradients(self, distribution, grads_and_vars, global_step, name):\n    update_ops = distribution.extended.call_for_each_replica(self._optimizer.apply_gradients, args=(grads_and_vars, global_step, name))\n    return distribution.group(update_ops)",
    "docstring": "Unconditionally apply gradients in cross replica context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:_apply_gradients arg:self arg:distribution arg:grads_and_vars arg:global_step arg:name arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "LegacyTypeSpecBatchEncoder",
    "source_code": "class LegacyTypeSpecBatchEncoder(TypeSpecBatchEncoder):\n\n    def batch(self, type_spec, batch_size):\n        return type_spec._batch(batch_size)\n\n    def unbatch(self, type_spec):\n        return type_spec._unbatch()\n\n    def encode(self, type_spec, value, minimum_rank=0):\n        if minimum_rank == 0:\n            return type_spec._to_tensor_list(value)\n        elif minimum_rank == 1:\n            if not isinstance(type_spec, BatchableTypeSpec):\n                raise ValueError(f'{type_spec.__name__}.encode does not support minimum_rank>0.')\n            return type_spec._to_batched_tensor_list(value)\n        else:\n            raise ValueError(f'{type_spec.__name__}.encode does not support minimum_rank>1.')\n\n    def decode(self, type_spec, encoded_value):\n        return type_spec._from_tensor_list(encoded_value)\n\n    def encoding_specs(self, spec):\n        return spec._flat_tensor_specs",
    "docstring": "TypeSpecBatchEncoder for legacy composite tensor classes. TODO(edloper): Update existing composite tensors to use non-legacy CompositeTensorBatchEncoders.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "ClassDef name:LegacyTypeSpecBatchEncoder FunctionDef name:batch arg:self arg:type_spec arg:batch_size arguments arg arg arg Return return:yes Call FunctionDef name:unbatch arg:self arg:type_spec arguments arg arg Return return:yes Call FunctionDef name:encode arg:self arg:type_spec arg:value arg:minimum_rank arguments arg arg arg arg If Compare Return return:yes Call If Compare If Call Raise Call Return return:yes Call Raise Call FunctionDef name:decode arg:self arg:type_spec arg:encoded_value arguments arg arg arg Return return:yes Call FunctionDef name:encoding_specs arg:self arg:spec arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_check_reflection_axis",
    "source_code": "def _check_reflection_axis(self, reflection_axis):\n    if reflection_axis.shape.ndims is not None and reflection_axis.shape.ndims < 1:\n        raise ValueError('Argument reflection_axis must have at least 1 dimension.  Found: %s' % reflection_axis)",
    "docstring": "Static check of reflection_axis.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_householder.py",
    "ast_data": "FunctionDef name:_check_reflection_axis arg:self arg:reflection_axis arguments arg arg If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "cryptography",
    "name": "generate_private_key",
    "source_code": "@abc.abstractmethod\ndef generate_private_key(self) -> DHPrivateKey:\n    pass",
    "docstring": "Generates and returns a DHPrivateKey.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:generate_private_key arg:self arguments arg"
  },
  {
    "library": "pygame",
    "name": "SysFont",
    "source_code": "def SysFont(name, size, bold=False, italic=False, constructor=None):\n    if constructor is None:\n        constructor = font_constructor\n    initsysfonts()\n    gotbold = gotitalic = False\n    fontname = None\n    if name:\n        if isinstance(name, (str, bytes)):\n            name = name.split(b',' if isinstance(name, bytes) else ',')\n        for single_name in name:\n            if isinstance(single_name, bytes):\n                single_name = single_name.decode()\n            single_name = _simplename(single_name)\n            styles = Sysfonts.get(single_name)\n            if not styles:\n                styles = Sysalias.get(single_name)\n            if styles:\n                plainname = styles.get((False, False))\n                fontname = styles.get((bold, italic))\n                if not (fontname or plainname):\n                    style, fontname = list(styles.items())[0]\n                    if bold and style[0]:\n                        gotbold = True\n                    if italic and style[1]:\n                        gotitalic = True\n                elif not fontname:\n                    fontname = plainname\n                elif plainname != fontname:\n                    gotbold = bold\n                    gotitalic = italic\n            if fontname:\n                break\n    set_bold = set_italic = False\n    if bold and (not gotbold):\n        set_bold = True\n    if italic and (not gotitalic):\n        set_italic = True\n    return constructor(fontname, size, set_bold, set_italic)",
    "docstring": "pygame.font.SysFont(name, size, bold=False, italic=False, constructor=None) -> Font Create a pygame Font from system font resources. This will search the system fonts for the given font name. You can also enable bold or italic styles, and the appropriate system font will be selected if available. This will always return a valid Font object, and will fallback on the builtin pygame font if the given font is not found. Name can also be an iterable of font names, a string of comma-separated font names, or a bytes of comma-separated font names, in which case the set of names will be searched in order. Pygame uses a small set of common font aliases. If the specific font you ask for is not available, a reasonable alternative may be used. If optional constructor is provided, it must be a function with signature constructor(fontpath, size, bold, italic) which returns a Font instance. If None, a pygame.font.Font object is created.",
    "type": "function",
    "file_path": "pygame\\src_py\\sysfont.py",
    "ast_data": "FunctionDef name:SysFont arg:name arg:size arg:bold arg:italic arg:constructor arguments arg arg arg arg arg If Compare Assign Call Assign Assign If If Call Assign Call Call For If Call Assign Call Assign Call Assign Call If Assign Call If Assign Call Assign Call If BoolOp Assign Call Call If BoolOp Assign If BoolOp Assign If Assign If Compare Assign Assign If Assign If BoolOp Assign If BoolOp Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "fixture_dirs",
    "source_code": "@cached_property\ndef fixture_dirs(self):\n    dirs = []\n    fixture_dirs = settings.FIXTURE_DIRS\n    if len(fixture_dirs) != len(set(fixture_dirs)):\n        raise ImproperlyConfigured('settings.FIXTURE_DIRS contains duplicates.')\n    for app_config in apps.get_app_configs():\n        app_label = app_config.label\n        app_dir = os.path.join(app_config.path, 'fixtures')\n        if app_dir in [str(d) for d in fixture_dirs]:\n            raise ImproperlyConfigured(\"'%s' is a default fixture directory for the '%s' app and cannot be listed in settings.FIXTURE_DIRS.\" % (app_dir, app_label))\n        if self.app_label and app_label != self.app_label:\n            continue\n        if os.path.isdir(app_dir):\n            dirs.append(app_dir)\n    dirs.extend(fixture_dirs)\n    dirs.append('')\n    return [os.path.realpath(d) for d in dirs]",
    "docstring": "Return a list of fixture directories. The list contains the 'fixtures' subdirectory of each installed application, if it exists, the directories in FIXTURE_DIRS, and the current directory.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\loaddata.py",
    "ast_data": "FunctionDef name:fixture_dirs arg:self arguments arg Assign Assign If Compare Call Call Call Raise Call For Call Assign Assign Call If Compare Call Raise Call If BoolOp Compare If Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_fused_node",
    "source_code": "def get_fused_node(self, node: BaseSchedulerNode) -> BaseSchedulerNode:\n    return self.name_to_fused_node[node.get_first_name()]",
    "docstring": "Look up the node in Scheduler name_to_fused_node",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:get_fused_node arg:self arg:node arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Callable",
    "source_code": "class Callable(saveable_object.SaveSpec):\n\n    def __init__(self, tensor_callable, dtype, device):\n        super().__init__(tensor_callable, None, None, dtype, device)",
    "docstring": "A callable that represents a Tensor that should be saved to checkpoint. This can be returned from in place of a Tensor. The callable will be executed on the specified device when the checkpoint is about to be written. Any class can use for checkpointing, but for SavedModel export, only resource-type variables* are supported. * must return True.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\tensor_callable.py",
    "ast_data": "ClassDef name:Callable FunctionDef name:__init__ arg:self arg:tensor_callable arg:dtype arg:device arguments arg arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_current_device_index",
    "source_code": "def get_current_device_index() -> int:\n    if torch.cuda.device_count() > 0:\n        return torch.cuda.current_device()\n    return -1",
    "docstring": "Checks if there are CUDA devices available and returns the device index of the current default CUDA device. Returns -1 in case there are no CUDA devices available. Arguments: ``",
    "type": "function",
    "file_path": "pytorch\\torch\\_utils.py",
    "ast_data": "FunctionDef name:get_current_device_index arguments If Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_Callables",
    "source_code": "class _Callables(_Constraint):\n\n    def is_satisfied_by(self, val):\n        return callable(val)\n\n    def __str__(self):\n        return 'a callable'",
    "docstring": "Constraint representing callables.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:_Callables FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes Call FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_sigterm_handler_fn",
    "source_code": "def _sigterm_handler_fn(self, signum, frame):\n    del signum, frame\n    self._maybe_set_received_own_sigterm()",
    "docstring": "Upload the to-be-preempted worker's id to coordination service.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py",
    "ast_data": "FunctionDef name:_sigterm_handler_fn arg:self arg:signum arg:frame arguments arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "warn_deprecated",
    "source_code": "def warn_deprecated(since, *, message='', name='', alternative='', pending=False, obj_type='', addendum='', removal=''):\n    warning = _generate_deprecation_warning(since, message, name, alternative, pending, obj_type, addendum, removal=removal)\n    from . import warn_external\n    warn_external(warning, category=MatplotlibDeprecationWarning)",
    "docstring": "Display a standardized deprecation. Parameters ---------- since : str The release at which this API became deprecated. message : str, optional Override the default deprecation message. The `` format specifiers will be replaced by the values of the respective arguments passed to this function. name : str, optional The name of the deprecated object. alternative : str, optional An alternative API that the user may use in place of the deprecated API. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a PendingDeprecationWarning instead of a DeprecationWarning. Cannot be used together with *removal*. obj_type : str, optional The object type being deprecated. addendum : str, optional Additional text appended directly to the final message. removal : str, optional The expected removal version. With the default (an empty string), a removal version is automatically computed from *since*. Set to other Falsy values to not schedule a removal date. Cannot be used together with *pending*. Examples -------- :: # To warn of the deprecation of \"matplotlib.name_of_module\" warn_deprecated('1.4.0', name='matplotlib.name_of_module', obj_type='module')",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_api\\deprecation.py",
    "ast_data": "FunctionDef name:warn_deprecated arg:since arguments arg arg arg arg arg arg arg arg Assign Call Call"
  },
  {
    "library": "authlib",
    "name": "get_client_secret",
    "source_code": "def get_client_secret(self):\n    raise NotImplementedError()",
    "docstring": "A method to return the client_secret of this client. For instance, the database table has a column called ``:: def get_client_secret(self): return self.client_secret",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\models.py",
    "ast_data": "FunctionDef name:get_client_secret arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "_calculate_null_both",
    "source_code": "def _calculate_null_both(data, statistic, n_permutations, batch, rng=None):\n    n_samples = len(data)\n    n_obs_i = [sample.shape[-1] for sample in data]\n    n_obs_ic = np.cumsum(n_obs_i)\n    n_obs = n_obs_ic[-1]\n    n_max = np.prod([comb(n_obs_ic[i], n_obs_ic[i - 1]) for i in range(n_samples - 1, 0, -1)])\n    if n_permutations >= n_max:\n        exact_test = True\n        n_permutations = n_max\n        perm_generator = _all_partitions_concatenated(n_obs_i)\n    else:\n        exact_test = False\n        perm_generator = (rng.permutation(n_obs) for i in range(n_permutations))\n    batch = batch or int(n_permutations)\n    null_distribution = []\n    data = np.concatenate(data, axis=-1)\n    for indices in _batch_generator(perm_generator, batch=batch):\n        indices = np.array(indices)\n        data_batch = data[..., indices]\n        data_batch = np.moveaxis(data_batch, -2, 0)\n        data_batch = np.split(data_batch, n_obs_ic[:-1], axis=-1)\n        null_distribution.append(statistic(*data_batch, axis=-1))\n    null_distribution = np.concatenate(null_distribution, axis=0)\n    return (null_distribution, n_permutations, exact_test)",
    "docstring": "Calculate null distribution for independent sample tests.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_resampling.py",
    "ast_data": "FunctionDef name:_calculate_null_both arg:data arg:statistic arg:n_permutations arg:batch arg:rng arguments arg arg arg arg arg Assign Call Assign Assign Call Assign Assign Call Call Call If Compare Assign Assign Assign Call Assign Assign Call Call Assign BoolOp Call Assign Assign Call For Call Assign Call Assign Assign Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs)\n    dtype = _assert_float_dtype(_get_dtype(dtype))\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype)",
    "docstring": "Returns a tensor object initialized to random normal values. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, is used, which default to unless you configured it otherwise (via ) **kwargs: Additional keyword arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "ENUM",
    "source_code": "class ENUM:\n\n    def __init__(self, *candidates: str | bool | None) -> None:\n        self._candidates = frozenset(candidates)\n\n    def __repr__(self) -> str:\n        return f'ENUM({', '.join(sorted(map(repr, self._candidates)))})'\n\n    def match(self, value: str | bool | None | Sequence[str | bool | None]) -> bool:\n        if isinstance(value, str | bool | None):\n            return value in self._candidates\n        return all((item in self._candidates for item in value))",
    "docstring": "Represents the candidates which a config value should be one of. Example: app.add_config_value('latex_show_urls', 'no', None, ENUM('no', 'footnote', 'inline'))",
    "type": "class",
    "file_path": "sphinx\\sphinx\\config.py",
    "ast_data": "ClassDef name:ENUM FunctionDef name:__init__ arg:self arguments arg arg Assign Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call FunctionDef name:match arg:self arg:value arguments arg arg If Call Return return:yes Compare Return return:yes Call Compare"
  },
  {
    "library": "scrapy",
    "name": "retrieve_response",
    "source_code": "def retrieve_response(self, spider: Spider, request: Request) -> Response | None:\n    metadata = self._read_meta(spider, request)\n    if metadata is None:\n        return None\n    rpath = Path(self._get_request_path(spider, request))\n    with self._open(rpath / 'response_body', 'rb') as f:\n        body = f.read()\n    with self._open(rpath / 'response_headers', 'rb') as f:\n        rawheaders = f.read()\n    url = metadata['response_url']\n    status = metadata['status']\n    headers = Headers(headers_raw_to_dict(rawheaders))\n    respcls = responsetypes.from_args(headers=headers, url=url, body=body)\n    return respcls(url=url, headers=headers, status=status, body=body)",
    "docstring": "Return response if present in cache, or None otherwise.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\httpcache.py",
    "ast_data": "FunctionDef name:retrieve_response arg:self arg:spider arg:request arguments arg arg arg Assign Call If Compare Return return:no Assign Call Call With Call Assign Call With Call Assign Call Assign Assign Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_inputs",
    "source_code": "@abc.abstractmethod\ndef _inputs(self):\n    raise NotImplementedError(f'{type(self)}._inputs()')",
    "docstring": "Returns a list of the input datasets of the dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_inputs arg:self arguments arg Raise Call Call"
  },
  {
    "library": "django",
    "name": "get_queryset",
    "source_code": "def get_queryset(self):\n    qs = self.model_admin.get_queryset(self.request)\n    qs = qs.complex_filter(self.source_field.get_limit_choices_to())\n    qs, search_use_distinct = self.model_admin.get_search_results(self.request, qs, self.term)\n    if search_use_distinct:\n        qs = qs.distinct()\n    return qs",
    "docstring": "Return queryset based on ModelAdmin.get_search_results().",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\views\\autocomplete.py",
    "ast_data": "FunctionDef name:get_queryset arg:self arguments arg Assign Call Assign Call Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "capture_triton",
    "source_code": "def capture_triton(triton_kernel: Callable, /) -> Any:\n    return wrap_triton(triton_kernel)",
    "docstring": "This API has been renamed to wrap_triton",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\triton.py",
    "ast_data": "FunctionDef name:capture_triton arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_log_level_pairs",
    "source_code": "def get_log_level_pairs(self):\n    return self.log_qname_to_level.items()",
    "docstring": "Returns all qualified module names for which the user requested explicit logging settings. .. warning: This function used to return all loggers, regardless of whether or not the user specified them or not; it now only returns logs which were explicitly mentioned by the user (and torch, which always is implicitly requested when we initialize our logging subsystem.)",
    "type": "method",
    "file_path": "pytorch\\torch\\_logging\\_internal.py",
    "ast_data": "FunctionDef name:get_log_level_pairs arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "StatefulSymbolicContext",
    "source_code": "@dataclass(frozen=True)\nclass StatefulSymbolicContext(StatelessSymbolicContext):\n    tensor_source: Source = None\n    shape_env_to_source_to_symbol_cache: dict[int, dict[str, sympy.Expr]] = None\n\n    def __post_init__(self) -> None:\n        super().__post_init__()\n        assert self.tensor_source is not None\n        if not self.shape_env_to_source_to_symbol_cache:\n            object.__setattr__(self, 'shape_env_to_source_to_symbol_cache', {})",
    "docstring": "Create symbols in `` via a symbolic_context determination as given by a cache of Source:Symbol. A cache hit will reuse a stored symbol, and a cache miss will write to this cache. This behaves like StatelessSymbolicContext, except the cache supersedes the other values - dynamic_sizes and constraint_sizes will not be read if we cache hit. It is the cache owner's responsibility to maintain the lifecycle of the cache with respect to different shape_envs, clearing, etc.",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "ClassDef name:StatefulSymbolicContext FunctionDef name:__post_init__ arg:self arguments arg Call Call Compare If Call Call"
  },
  {
    "library": "pytorch",
    "name": "DispatchKeySetVariable",
    "source_code": "class DispatchKeySetVariable(BaseTorchVariable):\n\n    @staticmethod\n    def create(value, **kwargs):\n        return DispatchKeySetVariable(value, **kwargs)\n\n    @classmethod\n    def create_with_source(cls, value, source):\n        install_guard(source.make_guard(GuardBuilder.DISPATCH_KEY_SET_MATCH))\n        return cls(value, source=source)\n\n    def is_constant_fold_method(self, name):\n        return name in ['has']\n\n    def call_method(self, tx, name, args: list[VariableTracker], kwargs: dict[str, VariableTracker]) -> 'VariableTracker':\n        if self.is_constant_fold_method(name) and check_unspec_or_constant_args(args, kwargs):\n            method = getattr(self.value, name)\n            return variables.ConstantVariable.create(method(*[x.as_python_constant() for x in args], **{k: v.as_python_constant() for k, v in kwargs.items()}))\n        elif name == 'highestPriorityTypeId':\n            return variables.EnumVariable(self.value.highestPriorityTypeId())\n        return super().call_method(tx, name, args, kwargs)",
    "docstring": "represents torch.DispatchKeySet",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\torch.py",
    "ast_data": "ClassDef name:DispatchKeySetVariable FunctionDef name:create arg:value arguments arg arg Return return:yes Call FunctionDef name:create_with_source arg:cls arg:value arg:source arguments arg arg arg Call Call Return return:yes Call FunctionDef name:is_constant_fold_method arg:self arg:name arguments arg arg Return return:yes Compare FunctionDef name:call_method arg:self arg:tx arg:name arg:args arg:kwargs arguments arg arg arg arg arg If BoolOp Call Call Assign Call Return return:yes Call Call Call Call Call If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "convert_to_line_delimits",
    "source_code": "def convert_to_line_delimits(s: str) -> str:\n    if not s[0] == '[' and s[-1] == ']':\n        return s\n    s = s[1:-1]\n    return convert_json_to_lines(s)",
    "docstring": "Helper function that converts JSON lists to line delimited JSON.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\json\\_normalize.py",
    "ast_data": "FunctionDef name:convert_to_line_delimits arg:s arguments arg If BoolOp Compare Compare Return return:yes Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_tensor",
    "source_code": "def set_tensor(self, tensor_index, value):\n    self._interpreter.SetTensor(tensor_index, value)",
    "docstring": "Sets the value of the input tensor. Note this copies data in . If you want to avoid copying, you can use the function to get a numpy buffer pointing to the input buffer in the tflite interpreter. Args: tensor_index: Tensor index of tensor to set. This value can be gotten from the 'index' field in get_input_details. value: Value of tensor to set. Raises: ValueError: If the interpreter could not set the tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:set_tensor arg:self arg:tensor_index arg:value arguments arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, onnxfunction: onnxscript.OnnxFunction | onnxscript.TracedOnnxFunction):\n    self.onnxfunction = onnxfunction\n    self.param_schema = self.onnxfunction.param_schemas()\n    op_schema = self.onnxfunction.op_schema\n    assert op_schema is not None\n    self.op_schema = op_schema\n    self.type_constraints = {constraint.type_param_str: set(constraint.allowed_type_strs) for constraint in self.op_schema.type_constraints}\n    self.attributes = self.op_schema.attributes\n    self._matching_score: int | None = None",
    "docstring": "Initialize the OnnxSchemaChecker . Args: onnxfunction: The OnnxFunction.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:onnxfunction arguments arg arg Assign Assign Call Assign Compare Assign Assign Call Assign"
  },
  {
    "library": "scipy",
    "name": "ppf",
    "source_code": "def ppf(self, q, *args, **kwds):\n    args, loc, scale = self._parse_args(*args, **kwds)\n    q, loc, scale = map(asarray, (q, loc, scale))\n    args = tuple(map(asarray, args))\n    _a, _b = self._get_support(*args)\n    cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)\n    cond1 = (0 < q) & (q < 1)\n    cond2 = cond0 & (q == 0)\n    cond3 = cond0 & (q == 1)\n    cond = cond0 & cond1\n    output = np.full(shape(cond), fill_value=self.badvalue)\n    lower_bound = _a * scale + loc\n    upper_bound = _b * scale + loc\n    place(output, cond2, argsreduce(cond2, lower_bound)[0])\n    place(output, cond3, argsreduce(cond3, upper_bound)[0])\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(q,) + args + (scale, loc))\n        scale, loc, goodargs = (goodargs[-2], goodargs[-1], goodargs[:-2])\n        place(output, cond, self._ppf(*goodargs) * scale + loc)\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Percent point function (inverse of ) at q of the given RV. Parameters ---------- q : array_like lower tail probability arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- x : array_like quantile corresponding to the lower tail probability q.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:ppf arg:self arg:q arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Compare Compare Assign Compare Compare Assign Compare Assign Compare Assign Assign Call Call Assign Assign Call Call Call Call If Call Assign Call Assign Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_fft_c2c",
    "source_code": "def _fft_c2c(func_name: str, input: TensorLikeType, n: Optional[int], dim: int, norm: NormType, forward: bool) -> TensorLikeType:\n    torch._check(input.dtype.is_complex, lambda: f'{func_name} expects a complex input tensor, but got {input.dtype}')\n    dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)\n    dim_size = n if n is not None else input.shape[dim]\n    torch._check(dim_size >= 1, lambda: f'Invalid number of data points ({dim_size}) specified')\n    if n is not None:\n        input = _resize_fft_input(input, dims, (n,))\n    ret = prims.fft_c2c(input, dim=dims, forward=forward)\n    return _apply_norm(ret, norm, dim_size, forward)",
    "docstring": "Common code for performing any complex to complex FFT (fft or ifft)",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\fft.py",
    "ast_data": "FunctionDef name:_fft_c2c arg:func_name arg:input arg:n arg:dim arg:norm arg:forward arguments arg arg arg arg arg arg Call arguments Assign Call Assign Compare Call Compare arguments If Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_copy_trackable_to_cpu",
    "source_code": "def _copy_trackable_to_cpu(self, object_map):\n    if self in object_map:\n        for v in self._variables:\n            v._copy_trackable_to_cpu(object_map)\n    else:\n        copied_vars = []\n        for v in self._variables:\n            v._copy_trackable_to_cpu(object_map)\n            copied_vars.append(object_map[v])\n        new_var = ShardedVariable(copied_vars, name=self.name)\n        object_map[self] = new_var",
    "docstring": "For implementing async checkpointing.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:_copy_trackable_to_cpu arg:self arg:object_map arguments arg arg If Compare For Call Assign For Call Call Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "winter",
    "source_code": "def winter() -> None:\n    set_cmap('winter')",
    "docstring": "Set the colormap to 'winter'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:winter arguments Call"
  },
  {
    "library": "pytorch",
    "name": "base_python_version",
    "source_code": "def base_python_version(self) -> str:\n    return self.python_version(python=self.base_executable)",
    "docstring": "Get the Python version for the base environment.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:base_python_version arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_to_bcdhw",
    "source_code": "def _to_bcdhw(tensor: Tensor) -> Tensor:\n    if not isinstance(tensor, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(tensor)}')\n    if len(tensor.shape) < 3:\n        raise ValueError(f'Input size must be a three, four or five dimensional tensor. Got {tensor.shape}')\n    if len(tensor.shape) == 3:\n        tensor = tensor.unsqueeze(0)\n    if len(tensor.shape) == 4:\n        tensor = tensor.unsqueeze(0)\n    if len(tensor.shape) > 5:\n        tensor = tensor.view(-1, tensor.shape[-4], tensor.shape[-3], tensor.shape[-2], tensor.shape[-1])\n    return tensor",
    "docstring": "Convert a PyTorch tensor image to BCDHW format. Args: tensor (torch.Tensor): image of the form :math:. Returns: input tensor of the form :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\image.py",
    "ast_data": "FunctionDef name:_to_bcdhw arg:tensor arguments arg If Call Raise Call Call If Compare Call Raise Call If Compare Call Assign Call If Compare Call Assign Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shortcut_string_merge",
    "source_code": "def shortcut_string_merge(self, node_def):\n    device = node_def.device or ''\n    merge_key = (self._spec, device)\n    result = _string_merge_cache.get(merge_key)\n    if result is None:\n        result = self.__call__(node_def).to_string()\n        _string_merge_cache[merge_key] = result\n    return result",
    "docstring": "Merge a node def without materializing a full DeviceSpec object. Often a device merge is invoked in order to generate a string which can be passed into the c api. In such a case, we can cache the node_def.device -> merge_result_string map, and in most cases avoid: - Materializing a copy of self._spec (In the case of DeviceSpecV1) - Materializing a DeviceSpec for node_def.device - A DeviceSpec.merge_from invocation In practice the cache hit rate for this function is very high, because the number of invocations when iterating through the device stack is much larger than the number of devices. Args: node_def: An Operation (or Operation-like) to merge device constraints with self._spec Returns: A string containing the merged device specification.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\device.py",
    "ast_data": "FunctionDef name:shortcut_string_merge arg:self arg:node_def arguments arg arg Assign BoolOp Assign Assign Call If Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "synchronize",
    "source_code": "def synchronize(device: Optional[_device_t]=None) -> None:\n    _lazy_init()\n    with torch.cuda.device(device):\n        return torch._C._cuda_synchronize()",
    "docstring": "Wait for all kernels in all streams on a CUDA device to complete. Args: device (torch.device or int, optional): device for which to synchronize. It uses the current device, given by :func:, if :attr: is `` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:synchronize arg:device arguments arg Call With Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "get_shear_matrix2d",
    "source_code": "def get_shear_matrix2d(center: Tensor, sx: Optional[Tensor]=None, sy: Optional[Tensor]=None) -> Tensor:\n    sx = tensor([0.0]).repeat(center.size(0)) if sx is None else sx\n    sy = tensor([0.0]).repeat(center.size(0)) if sy is None else sy\n    x, y = torch.split(center, 1, dim=-1)\n    x, y = (x.view(-1), y.view(-1))\n    sx_tan = tan(sx)\n    sy_tan = tan(sy)\n    ones = ones_like(sx)\n    shear_mat = stack([ones, -sx_tan, sx_tan * y, -sy_tan, ones + sx_tan * sy_tan, sy_tan * (x - sx_tan * y)], dim=-1).view(-1, 2, 3)\n    shear_mat = convert_affinematrix_to_homography(shear_mat)\n    return shear_mat",
    "docstring": "Compose shear matrix Bx4x4 from the components. Note: Ordered shearing, shear x-axis then y-axis. .. math:: \\begin{bmatrix} 1 & b \\\\ a & ab + 1 \\\\ \\end{bmatrix} Args: center: shearing center coordinates of (x, y). sx: shearing angle along x axis in radiants. sy: shearing angle along y axis in radiants Returns: params to be passed to the affine transformation with shape :math:. Examples: >>> rng = torch.manual_seed(0) >>> sx = torch.randn(1) >>> sx tensor([1.5410]) >>> center = torch.tensor([[0., 0.]]) # Bx2 >>> get_shear_matrix2d(center, sx=sx) tensor([[[ 1.0000, -33.5468, 0.0000], [ -0.0000, 1.0000, 0.0000], [ 0.0000, 0.0000, 1.0000]]]) .. note:: This function is often used in conjunction with :func:, :func:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py",
    "ast_data": "FunctionDef name:get_shear_matrix2d arg:center arg:sx arg:sy arguments arg arg arg Assign Compare Call Call Call Assign Compare Call Call Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__add__",
    "source_code": "def __add__(self, other):\n    return add(self, other)",
    "docstring": "Return (self + other), that is string concatenation, element-wise for a pair of array_likes of str or unicode. See Also -------- add",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:__add__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_safe_name_scope",
    "source_code": "def _to_safe_name_scope(signature_key: str, user_input_name: str):\n    name_scope = '{}_{}'.format(signature_key, user_input_name)\n    if re.match('^[A-Za-z0-9.][A-Za-z0-9_.\\\\\\\\-]*$', name_scope):\n        return name_scope\n    invalid_prefix_stripped = re.sub('^[^A-Za-z0-9.]*', '', name_scope)\n    return re.sub('[^A-Za-z0-9_.\\\\\\\\-]', '_', invalid_prefix_stripped)",
    "docstring": "Creates a sanitized name scope from user signature and input names. Concatenates signature and input names, sanitizing as needed to be a valid scope name. Args: signature_key: The user-provided key for the signature. user_input_name: The user-provided name for the input placeholder. Returns: A name scope that is safe to be used in tf.name_scope().",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:_to_safe_name_scope arg:signature_key arg:user_input_name arguments arg arg Assign Call If Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "as_fake",
    "source_code": "def as_fake(self):\n    return self.__variable.as_proxy().node.meta['example_value']",
    "docstring": "Returns a \"fake\" value (either a FakeTensor or a SymInt) representing the variable in question. This only works for variables that denote Tensor or int. You can use this to query metadata; e.g., v.as_fake().size(0) will tell you the compile-time known size of the tensor. WARNING: Do NOT mutate the returned tensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:as_fake arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_write_bytecode",
    "source_code": "@tf_export('mlir.experimental.write_bytecode')\ndef experimental_write_bytecode(filename, mlir_txt):\n    pywrap_mlir.experimental_write_bytecode(filename, mlir_txt)",
    "docstring": "Writes an MLIR module out as bytecode. Args: filename: The filename to write to. mlir_txt: The MLIR module in textual format.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\mlir\\mlir.py",
    "ast_data": "FunctionDef name:experimental_write_bytecode arg:filename arg:mlir_txt arguments arg arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_linewidth",
    "source_code": "def set_linewidth(self, lw):\n    if lw is None:\n        lw = self._get_default_linewidth()\n    self._us_lw = np.atleast_1d(lw)\n    self._linewidths, self._linestyles = self._bcast_lwls(self._us_lw, self._us_linestyles)\n    self.stale = True",
    "docstring": "Set the linewidth(s) for the collection. *lw* can be a scalar or a sequence; if it is a sequence the patches will cycle through the sequence Parameters ---------- lw : float or list of floats",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_linewidth arg:self arg:lw arguments arg arg If Compare Assign Call Assign Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "StringTable",
    "source_code": "class StringTable(object):\n\n    def __init__(self):\n        self._string_table = ['']\n        self._string_to_index = {'': 0}\n\n    def index_of(self, value_str):\n        if value_str is None:\n            value_str = ''\n        if value_str in self._string_to_index:\n            return self._string_to_index[value_str]\n        index = len(self._string_table)\n        self._string_table.append(value_str)\n        self._string_to_index[value_str] = index\n        return index\n\n    def next_index(self):\n        return len(self._string_table)\n\n    def string_table(self):\n        return self._string_table",
    "docstring": "Keeps track of strings to add to string_table in pprof proto.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "ClassDef name:StringTable FunctionDef name:__init__ arg:self arguments arg Assign Assign FunctionDef name:index_of arg:self arg:value_str arguments arg arg If Compare Assign If Compare Return return:yes Assign Call Call Assign Return return:yes FunctionDef name:next_index arg:self arguments arg Return return:yes Call FunctionDef name:string_table arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, rate, validate_args=False, allow_nan_stats=True, name='Exponential'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[rate]) as name:\n        self._rate = ops.convert_to_tensor(rate, name='rate')\n    super(Exponential, self).__init__(concentration=array_ops.ones([], dtype=self._rate.dtype), rate=self._rate, allow_nan_stats=allow_nan_stats, validate_args=validate_args, name=name)\n    self._parameters = parameters\n    self._graph_parents += [self._rate]",
    "docstring": "Construct Exponential distribution with parameter . Args: rate: Floating point tensor, equivalent to . Must contain only positive values. validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\exponential.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:rate arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg Assign Call Call With Call Assign Call Call Call Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_text",
    "source_code": "def get_text(self):\n    return self._text.get_text()",
    "docstring": "Return the string representation of this area's text.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:get_text arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_lookup_dependency",
    "source_code": "def _lookup_dependency(self, name, cached_dependencies=None):\n    if cached_dependencies:\n        return cached_dependencies.get(name)\n    return self._self_unconditional_dependency_names.get(name)",
    "docstring": "Look up a dependency by name. May be overridden to include conditional dependencies. Args: name: The local name of the dependency. cached_dependencies: Optional dict containing all computed dependencies returned by . Returns: A object, or if no dependency by this name was found.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_lookup_dependency arg:self arg:name arg:cached_dependencies arguments arg arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "joint_bilateral_blur",
    "source_code": "def joint_bilateral_blur(input: Tensor, guidance: Tensor, kernel_size: tuple[int, int] | int, sigma_color: float | Tensor, sigma_space: tuple[float, float] | Tensor, border_type: str='reflect', color_distance_type: str='l1') -> Tensor:\n    return _bilateral_blur(input, guidance, kernel_size, sigma_color, sigma_space, border_type, color_distance_type)",
    "docstring": "Blur a tensor using a Joint Bilateral filter. .. image:: _static/img/joint_bilateral_blur.png This operator is almost identical to a Bilateral filter. The only difference is that the color Gaussian kernel is computed based on another image called a guidance image. See :func: for more information. Arguments: input: the input tensor with shape :math:. guidance: the guidance tensor with shape :math:. kernel_size: the size of the kernel. sigma_color: the standard deviation for intensity/color Gaussian kernel. Smaller values preserve more edges. sigma_space: the standard deviation for spatial Gaussian kernel. This is similar to `gaussian_blur2d()(B, C, H, W)`. Examples: >>> input = torch.rand(2, 4, 5, 5) >>> guidance = torch.rand(2, 4, 5, 5) >>> output = joint_bilateral_blur(input, guidance, (3, 3), 0.1, (1.5, 1.5)) >>> output.shape torch.Size([2, 4, 5, 5])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\bilateral.py",
    "ast_data": "FunctionDef name:joint_bilateral_blur arg:input arg:guidance arg:kernel_size arg:sigma_color arg:sigma_space arg:border_type arg:color_distance_type arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_hc_cut",
    "source_code": "def _hc_cut(n_clusters, children, n_leaves):\n    if n_clusters > n_leaves:\n        raise ValueError(f'Cannot extract more clusters than samples: {n_clusters} clusters were given for a tree with {n_leaves} leaves.')\n    nodes = [-(max(children[-1]) + 1)]\n    for _ in range(n_clusters - 1):\n        these_children = children[-nodes[0] - n_leaves]\n        heappush(nodes, -these_children[0])\n        heappushpop(nodes, -these_children[1])\n    label = np.zeros(n_leaves, dtype=np.intp)\n    for i, node in enumerate(nodes):\n        label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i\n    return label",
    "docstring": "Function cutting the ward tree for a given number of clusters. Parameters ---------- n_clusters : int or ndarray The number of clusters to form. children : ndarray of shape (n_nodes-1, 2) The children of each non-leaf node. Values less than correspond to leaves of the tree which are the original samples. A node greater than or equal to is a non-leaf node and has children . Alternatively at the i-th iteration, children[i][0] and children[i][1] are merged to form node . n_leaves : int Number of leaves of the tree. Returns ------- labels : array [n_samples] Cluster labels for each point.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cluster\\_agglomerative.py",
    "ast_data": "FunctionDef name:_hc_cut arg:n_clusters arg:children arg:n_leaves arguments arg arg arg If Compare Raise Call Assign Call For Call Assign Call Call Assign Call For Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "database_backwards",
    "source_code": "def database_backwards(self, app_label, schema_editor, from_state, to_state):\n    raise NotImplementedError('subclasses of Operation must provide a database_backwards() method')",
    "docstring": "Perform the mutation on the database schema in the reverse direction - e.g. if this were CreateModel, it would in fact drop the model's table.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\operations\\base.py",
    "ast_data": "FunctionDef name:database_backwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "is_set",
    "source_code": "@property\ndef is_set(self):\n    return capi.is_field_set(self._feat.ptr, self._index)",
    "docstring": "Return True if the value of this field isn't null, False otherwise.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\field.py",
    "ast_data": "FunctionDef name:is_set arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_tensor_like",
    "source_code": "def create_tensor_like(creation_fn):\n\n    def _constant_like(x, *, dtype=None, device=None, layout=None, pin_memory=False, memory_format=None):\n        assert_nyi(not pin_memory, 'pin_memory')\n        assert_nyi(layout in (None, torch.strided), f'layout={layout}')\n        if dtype is None:\n            dtype = x.get_dtype()\n        else:\n            dtype = decode_dtype(dtype)\n        device = device or x.get_device()\n        size = list(x.get_size())\n        return creation_fn(size, dtype=dtype, device=device, layout=layout, pin_memory=pin_memory)\n    return _constant_like",
    "docstring": "Shim to convert X_like(...) into X(...). For example zeros_like() into zeros().",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:create_tensor_like arg:creation_fn arguments arg FunctionDef name:_constant_like arg:x arguments arg arg arg arg arg arg Call Call Compare If Compare Assign Call Assign Call Assign BoolOp Call Assign Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "to_torch_tensor",
    "source_code": "def to_torch_tensor(nd_tensor):\n    if nd_tensor.dtype == 'bool':\n        return torch.from_numpy(nd_tensor.numpy())\n    return torch.utils.dlpack.from_dlpack(nd_tensor.to_dlpack())",
    "docstring": "A helper function to transfer a NDArray to torch.tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\backends\\tvm.py",
    "ast_data": "FunctionDef name:to_torch_tensor arg:nd_tensor arguments arg If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_sweep_poly_phase",
    "source_code": "def _sweep_poly_phase(t, poly):\n    intpoly = polyint(poly)\n    phase = 2 * pi * polyval(intpoly, t)\n    return phase",
    "docstring": "Calculate the phase used by sweep_poly to generate its output. See for a description of the arguments.",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_waveforms.py",
    "ast_data": "FunctionDef name:_sweep_poly_phase arg:t arg:poly arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "handle_field",
    "source_code": "def handle_field(self, obj, field):\n    raise NotImplementedError('subclasses of Serializer must provide a handle_field() method')",
    "docstring": "Called to handle each individual (non-relational) field on an object.",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\base.py",
    "ast_data": "FunctionDef name:handle_field arg:self arg:obj arg:field arguments arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "XinSheYang04",
    "source_code": "class XinSheYang04(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = -1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        u = sum(sin(x) ** 2)\n        v = sum(x ** 2)\n        w = sum(sin(sqrt(abs(x))) ** 2)\n        return (u - exp(-v)) * exp(-w)",
    "docstring": "Xin-She Yang 4 objective function. This class defines the Xin-She Yang 4 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{XinSheYang04}}(x) = \\left[ \\sum_{i=1}^{n} \\sin^2(x_i) - e^{-\\sum_{i=1}^{n} x_i^2} \\right ] e^{-\\sum_{i=1}^{n} \\sin^2 \\sqrt{ \\lvert x_i \\rvert }} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_X.py",
    "ast_data": "ClassDef name:XinSheYang04 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Assign Call Assign Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "invert_yaxis",
    "source_code": "def invert_yaxis(self):\n    self.yaxis.set_inverted(not self.yaxis.get_inverted())",
    "docstring": "[*Discouraged*] Invert the y-axis. .. admonition:: Discouraged The use of this method is discouraged. Use instead. See Also -------- get_yinverted get_ylim, set_ylim get_ybound, set_ybound",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:invert_yaxis arg:self arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "reshape_tensor_complex",
    "source_code": "def reshape_tensor_complex(tensor: torch.Tensor) -> torch.Tensor:\n    *initial_dims, last_dim = tensor.shape\n    if last_dim % 2 != 0:\n        raise AssertionError('The size of the last dimension must be even to reshape it to [..., last_dim/2, 2]')\n    new_shape = (*initial_dims, last_dim // 2, 2)\n    reshaped_tensor = tensor.view(new_shape)\n    return reshaped_tensor",
    "docstring": "Reshape tensor from [*initial_dims, last_dim] to *initial_dims, last_dim/2, 2]",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\decomposition.py",
    "ast_data": "FunctionDef name:reshape_tensor_complex arg:tensor arguments arg Assign If Compare Raise Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "my_fact",
    "source_code": "@tf_export(v1=['user_ops.my_fact'])\ndef my_fact():\n    return _gen_user_ops.fact()",
    "docstring": "Example of overriding the generated code for an Op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\user_ops\\user_ops.py",
    "ast_data": "FunctionDef name:my_fact arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, fn, reduction=losses_utils.ReductionV2.AUTO, name=None, **kwargs):\n    super().__init__(reduction=reduction, name=name)\n    self.fn = fn\n    self._fn_kwargs = kwargs",
    "docstring": "Initializes class. Args: fn: The loss function to wrap, with signature . reduction: Type of to apply to loss. Default value is . indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to . When used with , outside of built-in training loops such as and , using or will raise an error. Please see this custom training [tutorial]( for more details. name: Optional name for the instance. **kwargs: The keyword arguments that are passed on to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fn arg:reduction arg:name arguments arg arg arg arg arg Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "GlorotNormal",
    "source_code": "class GlorotNormal(VarianceScaling):\n\n    def __init__(self, seed=None):\n        super(GlorotNormal, self).__init__(scale=1.0, mode='fan_avg', distribution='truncated_normal', seed=seed)\n\n    def get_config(self):\n        return {'seed': self.seed}",
    "docstring": "The Glorot normal initializer, also called Xavier normal initializer. Also available via the shortcut function . Draws samples from a truncated normal distribution centered on 0 with where is the number of input units in the weight tensor and is the number of output units in the weight tensor. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.GlorotNormal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.GlorotNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [Glorot et al., 2010]( ([pdf](",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "ClassDef name:GlorotNormal FunctionDef name:__init__ arg:self arg:seed arguments arg arg Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_LiteFuncCall",
    "source_code": "class _LiteFuncCall:\n\n    def __init__(self):\n        self.inputs = {}\n        self.outputs = {}\n        self.function_name = None\n        self.uuid = None\n        self.params = {}\n        self.level = -1\n        self.children_inputs_mappings = {}\n\n    def flattened_inputs_and_outputs(self):\n\n        def _flatten(input_or_output_dict):\n            flattened_items = []\n            for item in input_or_output_dict.values():\n                flattened_items.extend(item.flatten())\n            return flattened_items\n        return (_flatten(self.inputs), _flatten(self.outputs))\n\n    def __str__(self):\n\n        def format_args(items):\n            s = ''\n            for idx, item in items.iteritems():\n                s += '\\t\\t%d:\\n' % idx + str(item)\n            return s\n        inputs_str = '\\tInputs\\n' + format_args(self.inputs)\n        outputs_str = '\\tOutputs\\n' + format_args(self.outputs)\n        return 'tflite function %s call %s level %d \\n\\tinputs:\\n\\t\\t%s\\n\\toutputs:\\n\\t\\t%s' % (self.function_name, self.uuid, self.level, inputs_str, outputs_str)",
    "docstring": "Represent a TensorFlow Lite custom function. This is uses to accumulate found hints in the graphdef into a single conceptual unit. Attributes: inputs: inputs to the op (hash from index # to argument) outputs: outputs to the op (hash from index # to argument) function_name: the tflite custom op name to use uuid: a unique call id for this particular call (i.e. multiple function calls would have the same function_name but different uuids. params: A param name to key value for op constant data. I.e. for axis on a reduction, strides on a convolution, etc. level: Level of the OpHint. children_inputs_mappings: If the Ophint has children, children inputs mappings indicate how their inputs & outputs are mapped.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "ClassDef name:_LiteFuncCall FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign Assign Assign Assign FunctionDef name:flattened_inputs_and_outputs arg:self arguments arg FunctionDef name:_flatten arg:input_or_output_dict arguments arg Assign For Call Call Call Return return:yes Return return:yes Call Call FunctionDef name:__str__ arg:self arguments arg FunctionDef name:format_args arg:items arguments arg Assign For Call Call Return return:yes Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_inv_call",
    "source_code": "def _inv_call(self, y):\n    if self._cache_size == 0:\n        return self._inverse(y)\n    x_old, y_old = self._cached_x_y\n    if y is y_old:\n        return x_old\n    x = self._inverse(y)\n    self._cached_x_y = (x, y)\n    return x",
    "docstring": "Inverts the transform .",
    "type": "method",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "FunctionDef name:_inv_call arg:self arg:y arguments arg arg If Compare Return return:yes Call Assign If Compare Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_fitstart",
    "source_code": "def _fitstart(self, data, args=None):\n    if args is None:\n        args = (1.0,) * self.numargs\n    loc, scale = self._fit_loc_scale_support(data, *args)\n    return args + (loc, scale)",
    "docstring": "Starting point for fit (shape arguments + loc + scale).",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_fitstart arg:self arg:data arg:args arguments arg arg arg If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "fun",
    "source_code": "def fun(self, x):\n    raise NotImplementedError",
    "docstring": "Evaluate residuals at point . Parameters ---------- x : ndarray, shape (n,) Point of evaluation. Returns ------- ndarray, shape (m,) Vector of residuals at point .",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\lsq_problems.py",
    "ast_data": "FunctionDef name:fun arg:self arg:x arguments arg arg Raise"
  },
  {
    "library": "django",
    "name": "clone",
    "source_code": "def clone(self):\n    return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz)",
    "docstring": "Clone this coordinate sequence.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "tick_params",
    "source_code": "def tick_params(self, axis='both', **kwargs):\n    for ax in self.figure.axes:\n        ax.tick_params(axis=axis, **kwargs)\n    return self",
    "docstring": "Modify the ticks, tick labels, and gridlines. Parameters ---------- axis : {'x', 'y', 'both'} The axis on which to apply the formatting. kwargs : keyword arguments Additional keyword arguments to pass to :meth:. Returns ------- self : Grid instance Returns self for easy chaining.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:tick_params arg:self arg:axis arguments arg arg arg For Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "open_file_cm",
    "source_code": "def open_file_cm(path_or_file, mode='r', encoding=None):\n    fh, opened = to_filehandle(path_or_file, mode, True, encoding)\n    return fh if opened else contextlib.nullcontext(fh)",
    "docstring": "Pass through file objects and context-manage path-likes.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:open_file_cm arg:path_or_file arg:mode arg:encoding arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_assemble_sparse_jacobian",
    "source_code": "def _assemble_sparse_jacobian(self, J_eq, J_ineq, s):\n    n_vars, n_ineq, n_eq = (self.n_vars, self.n_ineq, self.n_eq)\n    J_aux = sps.vstack([J_eq, J_ineq], 'csr')\n    indptr, indices, data = (J_aux.indptr, J_aux.indices, J_aux.data)\n    new_indptr = indptr + np.hstack((np.zeros(n_eq, dtype=int), np.arange(n_ineq + 1, dtype=int)))\n    size = indices.size + n_ineq\n    new_indices = np.empty(size)\n    new_data = np.empty(size)\n    mask = np.full(size, False, bool)\n    mask[new_indptr[-n_ineq:] - 1] = True\n    new_indices[mask] = n_vars + np.arange(n_ineq)\n    new_indices[~mask] = indices\n    new_data[mask] = s\n    new_data[~mask] = data\n    J = sps.csr_array((new_data, new_indices, new_indptr), (n_eq + n_ineq, n_vars + n_ineq))\n    return J",
    "docstring": "Assemble sparse Jacobian given its components. Given `` returns: jacobian = [ J_eq, 0 ] [ J_ineq, diag(s) ] It is equivalent to: sps.bmat([[ J_eq, None ], [ J_ineq, diag(s) ]], \"csr\") but significantly more efficient for this given structure.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\tr_interior_point.py",
    "ast_data": "FunctionDef name:_assemble_sparse_jacobian arg:self arg:J_eq arg:J_ineq arg:s arguments arg arg arg arg Assign Assign Call Assign Assign Call Call Call Assign Assign Call Assign Call Assign Call Assign Assign Call Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "pop_obj",
    "source_code": "def pop_obj(self) -> T:\n    return self._stack.pop().obj",
    "docstring": "Remove last-inserted object and return it, without filename/line info.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py",
    "ast_data": "FunctionDef name:pop_obj arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_data_transform",
    "source_code": "def get_data_transform(self):\n    return artist.Artist.get_transform(self)",
    "docstring": "Return the mapping data coordinates to physical coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_data_transform arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "add_outputs",
    "source_code": "def add_outputs(self, *args, **kwargs):\n    if 'names' in kwargs:\n        return [self._outputs.add(arg, name=name) for arg, name in zip(args, kwargs['names'])]\n    else:\n        return [self._outputs.add(arg) for arg in args]",
    "docstring": "Add a sequence of outputs to the function invocation. Args: *args: List of outputs to be converted (should be tf.Tensor). **kwargs: See Returns: Wrapped outputs (identity standins that have additional metadata). These are also tf.Tensor's.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\op_hint.py",
    "ast_data": "FunctionDef name:add_outputs arg:self arguments arg arg arg If Compare Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "reproject_disparity_to_3D",
    "source_code": "def reproject_disparity_to_3D(disparity_tensor: Tensor, Q_matrix: Tensor) -> Tensor:\n    _check_Q_matrix(Q_matrix)\n    _check_disparity_tensor(disparity_tensor)\n    batch_size, rows, cols, _ = disparity_tensor.shape\n    dtype = disparity_tensor.dtype\n    device = disparity_tensor.device\n    uv = create_meshgrid(rows, cols, normalized_coordinates=False, device=device, dtype=dtype)\n    uv = uv.expand(batch_size, -1, -1, -1)\n    v, u = torch.unbind(uv, dim=-1)\n    v, u = (torch.unsqueeze(v, -1), torch.unsqueeze(u, -1))\n    uvd = stack((u, v, disparity_tensor), 1).reshape(batch_size, 3, -1).permute(0, 2, 1)\n    points = transform_points(Q_matrix, uvd).reshape(batch_size, rows, cols, 3)\n    if not points.shape == (batch_size, rows, cols, 3):\n        raise StereoException(f'Something went wrong in `reproject_disparity_to_3D`. Expected the final outputto be of shape {(batch_size, rows, cols, 3)}.But the computed point cloud had shape {points.shape}. Please ensure input are correct. If this is an error, please submit an issue.')\n    return points",
    "docstring": "Reproject the disparity tensor to a 3D point cloud. Args: disparity_tensor: Disparity tensor of shape :math:. Q_matrix: Tensor of Q matrices of shapes :math:. Returns: The 3D point cloud of shape :math:",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py",
    "ast_data": "FunctionDef name:reproject_disparity_to_3D arg:disparity_tensor arg:Q_matrix arguments arg arg Call Call Assign Assign Assign Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call Assign Call Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "while_body",
    "source_code": "def while_body(i, *ta_list):\n    inputs = [x[i, ...] if stacked else x for x, stacked, _ in pfor_input.inputs]\n    op_outputs = _create_op(pfor_input.op_type, inputs, output_dtypes, attrs=pfor_input.op.node_def.attr).outputs\n    outputs = []\n    for out, ta in zip(op_outputs, ta_list):\n        assert isinstance(out, tensor_lib.Tensor)\n        outputs.append(ta.write(i, out))\n    return tuple([i + 1] + outputs)",
    "docstring": "Body of while loop.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:while_body arg:i arguments arg arg Assign Assign Call Assign For Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "spidercls_for_request",
    "source_code": "def spidercls_for_request(spider_loader: SpiderLoaderProtocol, request: Request, default_spidercls: type[Spider] | None=None, log_none: bool=False, log_multiple: bool=False) -> type[Spider] | None:\n    snames = spider_loader.find_by_request(request)\n    if len(snames) == 1:\n        return spider_loader.load(snames[0])\n    if len(snames) > 1 and log_multiple:\n        logger.error('More than one spider can handle: %(request)s - %(snames)s', {'request': request, 'snames': ', '.join(snames)})\n    if len(snames) == 0 and log_none:\n        logger.error('Unable to find spider that handles: %(request)s', {'request': request})\n    return default_spidercls",
    "docstring": "Return a spider class that handles the given Request. This will look for the spiders that can handle the given request (using the spider loader) and return a Spider class if (and only if) there is only one Spider able to handle the Request. If multiple spiders (or no spider) are found, it will return the default_spidercls passed. It can optionally log if multiple or no spiders are found.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\spider.py",
    "ast_data": "FunctionDef name:spidercls_for_request arg:spider_loader arg:request arg:default_spidercls arg:log_none arg:log_multiple arguments arg arg arg arg arg Assign Call If Compare Call Return return:yes Call If BoolOp Compare Call Call Call If BoolOp Compare Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, mu=None, kappa=1, seed=None):\n    self._dist = vonmises_fisher_gen(seed)\n    self.dim, self.mu, self.kappa = self._dist._process_parameters(mu, kappa)",
    "docstring": "Create a frozen von Mises-Fisher distribution. Parameters ---------- mu : array_like, default: None Mean direction of the distribution. kappa : float, default: 1 Concentration parameter. Must be positive. seed : {None, int, , }, optional If is None (or ), the singleton is used. If is an int, a new `seedseed` instance then that instance is used.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:mu arg:kappa arg:seed arguments arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_tie_averaged_dcg",
    "source_code": "def _tie_averaged_dcg(y_true, y_score, discount_cumsum):\n    _, inv, counts = np.unique(-y_score, return_inverse=True, return_counts=True)\n    ranked = np.zeros(len(counts))\n    np.add.at(ranked, inv, y_true)\n    ranked /= counts\n    groups = np.cumsum(counts) - 1\n    discount_sums = np.empty(len(counts))\n    discount_sums[0] = discount_cumsum[groups[0]]\n    discount_sums[1:] = np.diff(discount_cumsum[groups])\n    return (ranked * discount_sums).sum()",
    "docstring": "Compute DCG by averaging over possible permutations of ties. The gain () of an index falling inside a tied group (in the order induced by ) is replaced by the average gain within this group. The discounted gain for a tied group is then the average within this group times the sum of discounts of the corresponding ranks. This amounts to averaging scores for all possible orderings of the tied groups. (note in the case of dcg@k the discount is 0 after index k) Parameters ---------- y_true : ndarray The true relevance scores. y_score : ndarray Predicted scores. discount_cumsum : ndarray Precomputed cumulative sum of the discounts. Returns ------- discounted_cumulative_gain : float The discounted cumulative gain. References ---------- McSherry, F., & Najork, M. (2008, March). Computing information retrieval performance measures efficiently in the presence of tied scores. In European conference on information retrieval (pp. 414-421). Springer, Berlin, Heidelberg.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_ranking.py",
    "ast_data": "FunctionDef name:_tie_averaged_dcg arg:y_true arg:y_score arg:discount_cumsum arguments arg arg arg Assign Call Assign Call Call Call Assign Call Assign Call Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "gen_new_seed",
    "source_code": "def gen_new_seed(seed, salt):\n    if seed is None:\n        return None\n    string = (str(seed) + salt).encode('utf-8')\n    return int(hashlib.md5(string).hexdigest()[:8], 16) & 2147483647",
    "docstring": "Generate a new seed, from the given seed and salt.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:gen_new_seed arg:seed arg:salt arguments arg arg If Compare Return return:no Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "graph",
    "source_code": "@property\ndef graph(self) -> func_graph_module.FuncGraph:\n    if self._cached_graph:\n        return self._cached_graph\n    if not self._generated_graph:\n        self._generated_graph = to_func_graph(self)\n    return self._generated_graph",
    "docstring": "Returns a FuncGraph corresponding to the AtomicFunction.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg If Return return:yes If Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "StateDictOptions",
    "source_code": "@dataclass\nclass StateDictOptions:\n    full_state_dict: bool = False\n    cpu_offload: bool = False\n    ignore_frozen_params: bool = False\n    keep_submodule_prefixes: bool = True\n    strict: bool = True\n    broadcast_from_rank0: bool = False\n    flatten_optimizer_state_dict: bool = False\n    dsd_fqn_modifiers: str = '_fqn_modifiers'",
    "docstring": "This dataclass specifies how get_state_dict/set_state_dict will work. - `` must be set to True when using this option. This option currently only supports DTensor, not the legacy ShardedTensor.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict.py",
    "ast_data": "ClassDef name:StateDictOptions"
  },
  {
    "library": "pytorch",
    "name": "FileListerIterDataPipe",
    "source_code": "@functional_datapipe('list_files')\nclass FileListerIterDataPipe(IterDataPipe[str]):\n\n    def __init__(self, root: Union[str, Sequence[str], IterDataPipe]='.', masks: Union[str, list[str]]='', *, recursive: bool=False, abspath: bool=False, non_deterministic: bool=False, length: int=-1) -> None:\n        super().__init__()\n        if isinstance(root, str):\n            root = [root]\n        if not isinstance(root, IterDataPipe):\n            root = IterableWrapperIterDataPipe(root)\n        self.datapipe: IterDataPipe = root\n        self.masks: Union[str, list[str]] = masks\n        self.recursive: bool = recursive\n        self.abspath: bool = abspath\n        self.non_deterministic: bool = non_deterministic\n        self.length: int = length\n\n    def __iter__(self) -> Iterator[str]:\n        for path in self.datapipe:\n            yield from get_file_pathnames_from_root(path, self.masks, self.recursive, self.abspath, self.non_deterministic)\n\n    def __len__(self):\n        if self.length == -1:\n            raise TypeError(f\"{type(self).__name__} instance doesn't have valid length\")\n        return self.length",
    "docstring": "Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory. Multiple root directories can be provided (functional name: ``, the results yielded from each root directory will be sorted length: Nominal length of the datapipe Example: >>> # xdoctest: +SKIP >>> from torchdata.datapipes.iter import FileLister >>> dp = FileLister(root=\".\", recursive=True) >>> list(dp) ['example.py', './data/data.tar']",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\filelister.py",
    "ast_data": "ClassDef name:FileListerIterDataPipe FunctionDef name:__init__ arg:self arg:root arg:masks arguments arg arg arg arg arg arg arg Call Call If Call Assign If Call Assign Call FunctionDef name:__iter__ arg:self arguments arg For Call FunctionDef name:__len__ arg:self arguments arg If Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "insert_weight_and_bias_get_attr_node_from_get_attr_to_scriptobject",
    "source_code": "def insert_weight_and_bias_get_attr_node_from_get_attr_to_scriptobject(gm: torch.fx.GraphModule, param_node: torch.fx.Node) -> tuple[torch.fx.Node, Optional[torch.fx.Node]]:\n    mod = get_script_object(gm, param_node)\n    w_qtensor, b_qtensor = mod.unpack()\n    w_attr_name, b_attr_name = (f'dequantized_{param_node.target}_w', f'dequantized_{param_node.target}_b')\n    return insert_weight_and_bias_get_attr_node(gm, w_qtensor, b_qtensor, w_attr_name, b_attr_name)",
    "docstring": "Directly inline tensor from a get_attr fx node.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\replace_quantized_ops_with_standard_ops_pass.py",
    "ast_data": "FunctionDef name:insert_weight_and_bias_get_attr_node_from_get_attr_to_scriptobject arg:gm arg:param_node arguments arg arg Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "fetch_access_token",
    "source_code": "def fetch_access_token(self, url=None, **kwargs):\n    return self.fetch_token(url, **kwargs)",
    "docstring": "Alias for fetch_token.",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\requests_client\\oauth2_session.py",
    "ast_data": "FunctionDef name:fetch_access_token arg:self arg:url arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_unmask_cipher_token",
    "source_code": "def _unmask_cipher_token(token):\n    mask = token[:CSRF_SECRET_LENGTH]\n    token = token[CSRF_SECRET_LENGTH:]\n    chars = CSRF_ALLOWED_CHARS\n    pairs = zip((chars.index(x) for x in token), (chars.index(x) for x in mask))\n    return ''.join((chars[x - y] for x, y in pairs))",
    "docstring": "Given a token (assumed to be a string of CSRF_ALLOWED_CHARS, of length CSRF_TOKEN_LENGTH, and that its first half is a mask), use it to decrypt the second half to produce the original secret.",
    "type": "function",
    "file_path": "django\\django\\middleware\\csrf.py",
    "ast_data": "FunctionDef name:_unmask_cipher_token arg:token arguments arg Assign Assign Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "handle",
    "source_code": "@property\ndef handle(self):\n    tpu_context = tpu_util.enclosing_tpu_context()\n    if tpu_context is None or context.executing_eagerly():\n        var = self._get_on_device_or_primary()\n        if isinstance(var, packed.PackedVarAndDevice):\n            return var.on_device_handle()\n        else:\n            return var.handle\n    else:\n        is_packed = self._packed_var is not None\n        val = self._values\n        if is_packed:\n            val = [self._packed_var]\n        return tpu_context.get_replicated_var_handle(self._common_name, self._handle_id, val, self._is_mirrored(), is_packed)",
    "docstring": "The handle by which this variable can be accessed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_values.py",
    "ast_data": "FunctionDef name:handle arg:self arguments arg Assign Call If BoolOp Compare Call Assign Call If Call Return return:yes Call Return return:yes Assign Compare Assign If Assign Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "css_classes",
    "source_code": "def css_classes(self, extra_classes=None):\n    if hasattr(extra_classes, 'split'):\n        extra_classes = extra_classes.split()\n    extra_classes = set(extra_classes or [])\n    if self.errors and hasattr(self.form, 'error_css_class'):\n        extra_classes.add(self.form.error_css_class)\n    if self.field.required and hasattr(self.form, 'required_css_class'):\n        extra_classes.add(self.form.required_css_class)\n    return ' '.join(extra_classes)",
    "docstring": "Return a string of space-separated CSS classes for this field.",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:css_classes arg:self arg:extra_classes arguments arg arg If Call Assign Call Assign Call BoolOp If BoolOp Call Call If BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_mixed_precision_enabled_for_buffers",
    "source_code": "def _mixed_precision_enabled_for_buffers(self) -> bool:\n    return self.mixed_precision.buffer_dtype is not None",
    "docstring": "Return whether the user explicitly enabled buffer mixed precision. NOTE: Unlike parameters and gradient reduction, buffer mixed precision is applied at the FSDP instance level, not the `` level, which may be different for the composable code path.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:_mixed_precision_enabled_for_buffers arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "record_choice",
    "source_code": "@staticmethod\ndef record_choice(multi_kernel_name: str, picked_kernel_name: str):\n    from torch._inductor.graph import GraphLowering\n    if not isinstance(V.graph, GraphLowering):\n        return\n    if not V.graph.record_multi_kernel_choice:\n        return\n    V.graph.multi_kernel_to_choice[multi_kernel_name] = picked_kernel_name",
    "docstring": "Record the multi-kernel choice for cpp-wrapper after autotuning We should do nothing if this function is not called during codegen.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\multi_kernel.py",
    "ast_data": "FunctionDef name:record_choice arg:multi_kernel_name arg:picked_kernel_name arguments arg arg If Call Return return:no If Return return:no Assign"
  },
  {
    "library": "django",
    "name": "_check_save_as",
    "source_code": "def _check_save_as(self, obj):\n    if not isinstance(obj.save_as, bool):\n        return must_be('a boolean', option='save_as', obj=obj, id='admin.E101')\n    else:\n        return []",
    "docstring": "Check save_as is a boolean.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_save_as arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "trainable_weights",
    "source_code": "@property\ndef trainable_weights(self):\n    if self.trainable:\n        children_weights = self._gather_children_attribute('trainable_variables')\n        return self._dedup_weights(self._trainable_weights + children_weights)\n    else:\n        return []",
    "docstring": "List of all trainable weights tracked by this layer. Trainable weights are updated via gradient descent during training. Returns: A list of trainable variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:trainable_weights arg:self arguments arg If Assign Call Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "identity",
    "source_code": "def identity(module, name):\n    Identity.apply(module, name)\n    return module",
    "docstring": "Apply pruning reparametrization without pruning any units. Applies pruning reparametrization to the tensor corresponding to the parameter called `` on which pruning will act. Returns: module (nn.Module): modified (i.e. pruned) version of the input module Examples: >>> # xdoctest: +SKIP >>> m = prune.identity(nn.Linear(2, 3), 'bias') >>> print(m.bias_mask) tensor([1., 1., 1.])",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:identity arg:module arg:name arguments arg arg Call Return return:yes"
  },
  {
    "library": "django",
    "name": "ErrorDict",
    "source_code": "class ErrorDict(dict, RenderableErrorMixin):\n    template_name = 'django/forms/errors/dict/default.html'\n    template_name_text = 'django/forms/errors/dict/text.txt'\n    template_name_ul = 'django/forms/errors/dict/ul.html'\n\n    def __init__(self, *args, renderer=None, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.renderer = renderer or get_default_renderer()\n\n    def as_data(self):\n        return {f: e.as_data() for f, e in self.items()}\n\n    def get_json_data(self, escape_html=False):\n        return {f: e.get_json_data(escape_html) for f, e in self.items()}\n\n    def get_context(self):\n        return {'errors': self.items(), 'error_class': 'errorlist'}",
    "docstring": "A collection of errors that knows how to display itself in various formats. The dictionary keys are the field names, and the values are the errors.",
    "type": "class",
    "file_path": "django\\django\\forms\\utils.py",
    "ast_data": "ClassDef name:ErrorDict Assign Assign Assign FunctionDef name:__init__ arg:self arguments arg arg arg arg Call Call Assign BoolOp Call FunctionDef name:as_data arg:self arguments arg Return return:yes Call Call FunctionDef name:get_json_data arg:self arg:escape_html arguments arg arg Return return:yes Call Call FunctionDef name:get_context arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._types[self.num]",
    "docstring": "Return a short-hand string form of the OGR Geometry type.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geomtype.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "hat",
    "source_code": "@staticmethod\ndef hat(v: Tensor) -> Tensor:\n    check_v_shape(v)\n    upsilon = stack((v[..., 0], v[..., 1]), -1)\n    theta = v[..., 2]\n    col0 = concatenate((So2.hat(theta), upsilon.unsqueeze(-2)), -2)\n    return pad(col0, (0, 1))",
    "docstring": "Convert elements from vector space to lie algebra. Returns matrix of shape :math:. Args: v: vector of shape:math:. Example: >>> theta = torch.tensor(3.1415/2) >>> So2.hat(theta) tensor([[0.0000, 1.5707], [1.5707, 0.0000]])",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py",
    "ast_data": "FunctionDef name:hat arg:v arguments arg Call Assign Call Assign Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "as_complex",
    "source_code": "def as_complex(real, imag=0):\n    return Expr(Op.COMPLEX, (as_expr(real), as_expr(imag)))",
    "docstring": "Return object as COMPLEX expression (complex literal constant).",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:as_complex arg:real arg:imag arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "confidence_interval",
    "source_code": "def confidence_interval(self, confidence_level=0.95, *, method='linear'):\n    message = 'Confidence interval bounds do not implement a `confidence_interval` method.'\n    if self._n is None:\n        raise NotImplementedError(message)\n    methods = {'linear': self._linear_ci, 'log-log': self._loglog_ci}\n    message = f'`method` must be one of {set(methods)}.'\n    if method.lower() not in methods:\n        raise ValueError(message)\n    message = '`confidence_level` must be a scalar between 0 and 1.'\n    confidence_level = np.asarray(confidence_level)[()]\n    if confidence_level.shape or not 0 <= confidence_level <= 1:\n        raise ValueError(message)\n    method_fun = methods[method.lower()]\n    low, high = method_fun(confidence_level)\n    message = 'The confidence interval is undefined at some observations. This is a feature of the mathematical formula used, not an error in its implementation.'\n    if np.any(np.isnan(low) | np.isnan(high)):\n        warnings.warn(message, RuntimeWarning, stacklevel=2)\n    low, high = (np.clip(low, 0, 1), np.clip(high, 0, 1))\n    low = EmpiricalDistributionFunction(self.quantiles, low, None, None, self._kind)\n    high = EmpiricalDistributionFunction(self.quantiles, high, None, None, self._kind)\n    return ConfidenceInterval(low, high)",
    "docstring": "Compute a confidence interval around the CDF/SF point estimate Parameters ---------- confidence_level : float, default: 0.95 Confidence level for the computed confidence interval method : str, {\"linear\", \"log-log\"} Method used to compute the confidence interval. Options are \"linear\" for the conventional Greenwood confidence interval (default) and \"log-log\" for the \"exponential Greenwood\", log-negative-log-transformed confidence interval. Returns ------- ci : `~scipy.stats._result_classes.EmpiricalDistributionFunction`) as described in [1]_. The conventional Greenwood formula can result in lower confidence limits less than 0 and upper confidence limits greater than 1; these are clipped to the unit interval. NaNs may be produced by either method; these are features of the formulas. References ---------- .. [1] Sawyer, Stanley. \"The Greenwood and Exponential Greenwood Confidence Intervals in Survival Analysis.\"",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_survival.py",
    "ast_data": "FunctionDef name:confidence_interval arg:self arg:confidence_level arguments arg arg arg Assign If Compare Raise Call Assign Assign Call If Compare Call Raise Call Assign Assign Call If BoolOp Compare Raise Call Assign Call Assign Call Assign If Call Call Call Call Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "check_paired_arrays",
    "source_code": "def check_paired_arrays(X, Y):\n    X, Y = check_pairwise_arrays(X, Y)\n    if X.shape != Y.shape:\n        raise ValueError('X and Y should be of same shape. They were respectively %r and %r long.' % (X.shape, Y.shape))\n    return (X, Y)",
    "docstring": "Set X and Y appropriately and checks inputs for paired distances. All paired distance metrics should use this function first to assert that the given parameters are correct and safe to use. Specifically, this function first ensures that both X and Y are arrays, then checks that they are at least two dimensional while ensuring that their elements are floats. Finally, the function checks that the size of the dimensions of the two arrays are equal. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples_X, n_features) Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features) Returns ------- safe_X : {array-like, sparse matrix} of shape (n_samples_X, n_features) An array equal to X, guaranteed to be a numpy array. safe_Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features) An array equal to Y if Y was not None, guaranteed to be a numpy array. If Y was None, safe_Y will be a pointer to X.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\pairwise.py",
    "ast_data": "FunctionDef name:check_paired_arrays arg:X arg:Y arguments arg arg Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "OutlierMixin",
    "source_code": "class OutlierMixin:\n    _estimator_type = 'outlier_detector'\n\n    def __sklearn_tags__(self):\n        tags = super().__sklearn_tags__()\n        tags.estimator_type = 'outlier_detector'\n        return tags\n\n    def fit_predict(self, X, y=None, **kwargs):\n        if _routing_enabled():\n            transform_params = self.get_metadata_routing().consumes(method='predict', params=kwargs.keys())\n            if transform_params:\n                warnings.warn(f\"This object ({self.__class__.__name__}) has a `predict` method which consumes metadata, but `fit_predict` does not forward metadata to `predict`. Please implement a custom `fit_predict` method to forward metadata to `predict` as well.Alternatively, you can explicitly do `set_predict_request`and set all values to `False` to disable metadata routed to `predict`, if that's an option.\", UserWarning)\n        return self.fit(X, **kwargs).predict(X)",
    "docstring": "Mixin class for all outlier detection estimators in scikit-learn. This mixin defines the following functionality: - set estimator type to through the tag; - method that default to and . Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator, OutlierMixin >>> class MyEstimator(OutlierMixin): ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.ones(shape=len(X)) >>> estimator = MyEstimator() >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> estimator.fit_predict(X) array([1., 1., 1.])",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "ClassDef name:OutlierMixin Assign FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign Return return:yes FunctionDef name:fit_predict arg:self arg:X arg:y arguments arg arg arg arg If Call Assign Call Call Call If Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "create_authorization_verifier",
    "source_code": "def create_authorization_verifier(self, request):\n    raise NotImplementedError()",
    "docstring": "Create and bind ``",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\authorization_server.py",
    "ast_data": "FunctionDef name:create_authorization_verifier arg:self arg:request arguments arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "get_title",
    "source_code": "def get_title(self):\n    return self._legend_title_box._text",
    "docstring": "Return the instance for the legend title.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend.py",
    "ast_data": "FunctionDef name:get_title arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "join",
    "source_code": "@tf_export('io.gfile.join')\ndef join(path, *paths):\n    path_ = compat.as_str_any(compat.path_to_str(path))\n    if '://' in path_[1:]:\n        return urljoin(path, *paths)\n    return os.path.join(path, *paths)",
    "docstring": "Join one or more path components intelligently. TensorFlow specific filesystems will be joined like a url (using \"/\" as the path seperator) on all platforms: On Windows or Linux/Unix-like: >>> tf.io.gfile.join(\"gcs://folder\", \"file.py\") 'gcs://folder/file.py' >>> tf.io.gfile.join(\"ram://folder\", \"file.py\") 'ram://folder/file.py' But the native filesystem is handled just like os.path.join: >>> path = tf.io.gfile.join(\"folder\", \"file.py\") >>> if os.name == \"nt\": ... expected = \"folder\\\\file.py\" # Windows ... else: ... expected = \"folder/file.py\" # Linux/Unix-like >>> path == expected True Args: path: string, path to a directory paths: string, additional paths to concatenate Returns: path: the joined path.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:join arg:path arguments arg arg Assign Call Call If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, low=0.0, high=1.0, validate_args=False, allow_nan_stats=True, name='Uniform'):\n    parameters = dict(locals())\n    with ops.name_scope(name, values=[low, high]) as name:\n        with ops.control_dependencies([check_ops.assert_less(low, high, message='uniform not defined when low >= high.')] if validate_args else []):\n            self._low = array_ops.identity(low, name='low')\n            self._high = array_ops.identity(high, name='high')\n            check_ops.assert_same_float_dtype([self._low, self._high])\n    super(Uniform, self).__init__(dtype=self._low.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._low, self._high], name=name)",
    "docstring": "Initialize a batch of Uniform distributions. Args: low: Floating point tensor, lower boundary of the output interval. Must have and .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\uniform.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:low arg:high arg:validate_args arg:allow_nan_stats arg:name arguments arg arg arg arg arg arg Assign Call Call With Call With Call Call Assign Call Assign Call Call Call Call Call"
  },
  {
    "library": "authlib",
    "name": "get_default_redirect_uri",
    "source_code": "def get_default_redirect_uri(self):\n    raise NotImplementedError()",
    "docstring": "A method to get client default redirect_uri. For instance, the database table for client has a column called ``:: def get_default_redirect_uri(self): return self.default_redirect_uri :return: A URL string",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\models.py",
    "ast_data": "FunctionDef name:get_default_redirect_uri arg:self arguments arg Raise Call"
  },
  {
    "library": "sphinx",
    "name": "_extract_zip",
    "source_code": "def _extract_zip(filename: Path, target_dir: Path, /) -> None:\n    ensuredir(target_dir)\n    with ZipFile(filename) as archive:\n        for name in archive.namelist():\n            if name.endswith('/'):\n                continue\n            entry = target_dir / name\n            ensuredir(entry.parent)\n            entry.write_bytes(archive.read(name))",
    "docstring": "Extract zip file to target directory.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\theming.py",
    "ast_data": "FunctionDef name:_extract_zip arguments arg arg Call With Call For Call If Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "self_cpu_time_total",
    "source_code": "@property\ndef self_cpu_time_total(self):\n    self._ensure_function_events()\n    assert self._function_events is not None\n    return self._function_events.self_cpu_time_total",
    "docstring": "Returns total time spent on CPU. The total time is a sum of all self times across all the events.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler.py",
    "ast_data": "FunctionDef name:self_cpu_time_total arg:self arguments arg Call Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "TimerQT",
    "source_code": "class TimerQT(TimerBase):\n\n    def __init__(self, *args, **kwargs):\n        self._timer = QtCore.QTimer()\n        self._timer.timeout.connect(self._on_timer)\n        super().__init__(*args, **kwargs)\n\n    def __del__(self):\n        if not _isdeleted(self._timer):\n            self._timer_stop()\n\n    def _timer_set_single_shot(self):\n        self._timer.setSingleShot(self._single)\n\n    def _timer_set_interval(self):\n        self._timer.setInterval(self._interval)\n\n    def _timer_start(self):\n        self._timer.start()\n\n    def _timer_stop(self):\n        self._timer.stop()",
    "docstring": "Subclass of using QTimer events.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_qt.py",
    "ast_data": "ClassDef name:TimerQT FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call Call Call Call FunctionDef name:__del__ arg:self arguments arg If Call Call FunctionDef name:_timer_set_single_shot arg:self arguments arg Call FunctionDef name:_timer_set_interval arg:self arguments arg Call FunctionDef name:_timer_start arg:self arguments arg Call FunctionDef name:_timer_stop arg:self arguments arg Call"
  },
  {
    "library": "kornia",
    "name": "fy",
    "source_code": "@property\ndef fy(self) -> Tensor:\n    return self._params[..., 1]",
    "docstring": "Returns the focal length in y direction.",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:fy arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_roll_vectorized",
    "source_code": "def _roll_vectorized(M, roll_indices, axis):\n    assert axis in [0, 1]\n    ndim = M.ndim\n    assert ndim == 3\n    ndim_roll = roll_indices.ndim\n    assert ndim_roll == 1\n    sh = M.shape\n    r, c = sh[-2:]\n    assert sh[0] == roll_indices.shape[0]\n    vec_indices = np.arange(sh[0], dtype=np.int32)\n    M_roll = np.empty_like(M)\n    if axis == 0:\n        for ir in range(r):\n            for ic in range(c):\n                M_roll[:, ir, ic] = M[vec_indices, (-roll_indices + ir) % r, ic]\n    else:\n        for ir in range(r):\n            for ic in range(c):\n                M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices + ic) % c]\n    return M_roll",
    "docstring": "Roll an array of matrices along *axis* (0: rows, 1: columns) according to an array of indices *roll_indices*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:_roll_vectorized arg:M arg:roll_indices arg:axis arguments arg arg arg Compare Assign Compare Assign Compare Assign Assign Compare Assign Call Assign Call If Compare For Call For Call Assign For Call For Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "validate",
    "source_code": "def validate(self, value, model_instance):\n    if not self.editable:\n        return\n    if self.choices is not None and value not in self.empty_values:\n        for option_key, option_value in self.choices:\n            if isinstance(option_value, (list, tuple)):\n                for optgroup_key, optgroup_value in option_value:\n                    if value == optgroup_key:\n                        return\n            elif value == option_key:\n                return\n        raise exceptions.ValidationError(self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value})\n    if value is None and (not self.null):\n        raise exceptions.ValidationError(self.error_messages['null'], code='null')\n    if not self.blank and value in self.empty_values:\n        raise exceptions.ValidationError(self.error_messages['blank'], code='blank')",
    "docstring": "Validate value and raise ValidationError if necessary. Subclasses should override this to provide validation logic.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:validate arg:self arg:value arg:model_instance arguments arg arg arg If Return return:no If BoolOp Compare Compare For If Call For If Compare Return return:no If Compare Return return:no Raise Call If BoolOp Compare Raise Call If BoolOp Compare Raise Call"
  },
  {
    "library": "pandas",
    "name": "_vector_divlike_op",
    "source_code": "def _vector_divlike_op(self, other, op) -> np.ndarray | Self:\n    result = op(self._ndarray, np.asarray(other))\n    if (is_integer_dtype(other.dtype) or is_float_dtype(other.dtype)) and op in [operator.truediv, operator.floordiv]:\n        return type(self)._simple_new(result, dtype=result.dtype)\n    if op in [operator.floordiv, roperator.rfloordiv]:\n        mask = self.isna() | isna(other)\n        if mask.any():\n            result = result.astype(np.float64)\n            np.putmask(result, mask, np.nan)\n    return result",
    "docstring": "Shared logic for __truediv__, __floordiv__, and their reversed versions with timedelta64-dtype ndarray other.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\timedeltas.py",
    "ast_data": "FunctionDef name:_vector_divlike_op arg:self arg:other arg:op arguments arg arg arg Assign Call Call If BoolOp BoolOp Call Call Compare Return return:yes Call Call If Compare Assign Call Call If Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TypeExemplars",
    "source_code": "class TypeExemplars:\n    TYPE_EXEMPLARS: dict[str, Any] = {CustomGraphPass.__name__: DummyPass(), torch.fx.graph.Graph.__name__: torch.fx.graph.Graph(), BaseSchedulerNode.__name__: BaseSchedulerNode(None)}\n\n    @staticmethod\n    def example(t: type[T]) -> Optional[T]:\n        return TypeExemplars.TYPE_EXEMPLARS.get(t.__name__, None)\n\n    @staticmethod\n    def contains(t: type[T]) -> bool:\n        return t.__name__ in TypeExemplars.TYPE_EXEMPLARS",
    "docstring": "This class returns examples of a Type, given its class name.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\fuzzer.py",
    "ast_data": "ClassDef name:TypeExemplars Call Call Call FunctionDef name:example arg:t arguments arg Return return:yes Call FunctionDef name:contains arg:t arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "greater",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef greater(x, y):\n    return math_ops.greater(x, y)",
    "docstring": "Element-wise truth value of (x > y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:greater arg:x arg:y arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_process_parameters",
    "source_code": "@staticmethod\ndef _process_parameters(row, col):\n    r = np.array(row, dtype=np.int64, copy=True)\n    c = np.array(col, dtype=np.int64, copy=True)\n    if np.ndim(r) != 1:\n        raise ValueError('`row` must be one-dimensional')\n    if np.ndim(c) != 1:\n        raise ValueError('`col` must be one-dimensional')\n    if np.any(r < 0):\n        raise ValueError('each element of `row` must be non-negative')\n    if np.any(c < 0):\n        raise ValueError('each element of `col` must be non-negative')\n    n = np.sum(r)\n    if n != np.sum(c):\n        raise ValueError('sums over `row` and `col` must be equal')\n    if not np.all(r == np.asarray(row)):\n        raise ValueError('each element of `row` must be an integer')\n    if not np.all(c == np.asarray(col)):\n        raise ValueError('each element of `col` must be an integer')\n    return (r, c, n)",
    "docstring": "Check that row and column vectors are one-dimensional, that they do not contain negative or non-integer entries, and that the sums over both vectors are equal.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_parameters arg:row arg:col arguments arg arg Assign Call Assign Call If Compare Call Raise Call If Compare Call Raise Call If Call Compare Raise Call If Call Compare Raise Call Assign Call If Compare Call Raise Call If Call Compare Call Raise Call If Call Compare Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_GetCheckpointFilename",
    "source_code": "def _GetCheckpointFilename(save_dir, latest_filename):\n    if latest_filename is None:\n        latest_filename = 'checkpoint'\n    return os.path.join(save_dir, latest_filename)",
    "docstring": "Returns a filename for storing the CheckpointState. Args: save_dir: The directory for saving and restoring checkpoints. latest_filename: Name of the file in 'save_dir' that is used to store the CheckpointState. Returns: The path of the file that contains the CheckpointState proto.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:_GetCheckpointFilename arg:save_dir arg:latest_filename arguments arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, *system):\n    super().__init__(*system)",
    "docstring": "Initialize the baseclass. The heavy lifting is done by the subclasses.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_variable",
    "source_code": "def _create_variable(self, next_creator, **kwargs):\n    colocate_with = kwargs.pop('colocate_with', None)\n    if colocate_with is None:\n        devices = self._devices\n    elif isinstance(colocate_with, numpy_dataset.SingleDevice):\n        with ops.device(colocate_with.device):\n            return next_creator(**kwargs)\n    else:\n        devices = colocate_with._devices\n\n    def _real_mirrored_creator(**kwargs):\n        value_list = []\n        for i, d in enumerate(devices):\n            with ops.device(d):\n                kwargs['initial_value'] = self._get_variable_creator_initial_value(replica_id=i, device=d, primary_var=value_list[0] if value_list else None, **kwargs)\n                if i > 0:\n                    var0name = value_list[0].name.split(':')[0]\n                    kwargs['name'] = '%s/replica_%d/' % (var0name, i)\n                with context.device_policy(context.DEVICE_PLACEMENT_SILENT):\n                    with record.stop_recording():\n                        v = next_creator(**kwargs)\n                assert not isinstance(v, values.DistributedVariable)\n                value_list.append(v)\n        return value_list\n    return distribute_utils.create_mirrored_variable(self._container_strategy(), _real_mirrored_creator, distribute_utils.VARIABLE_CLASS_MAPPING, distribute_utils.VARIABLE_POLICY_MAPPING, **kwargs)",
    "docstring": "Create a mirrored variable. See .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:_create_variable arg:self arg:next_creator arguments arg arg arg Assign Call If Compare Assign If Call With Call Return return:yes Call Assign FunctionDef name:_real_mirrored_creator arguments arg Assign For Call With Call Assign Call If Compare Assign Call Assign With Call With Call Assign Call Call Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_pad",
    "source_code": "def get_pad(self):\n    return self._base_pad",
    "docstring": "Get the value of the tick label pad in points.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_pad arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_has_aliases",
    "source_code": "@property\ndef _has_aliases(self) -> bool:\n    return is_list_like(self.header)",
    "docstring": "Whether the aliases for column names are present.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\excel.py",
    "ast_data": "FunctionDef name:_has_aliases arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "_md5sum",
    "source_code": "def _md5sum(file: IO[bytes]) -> str:\n    m = hashlib.md5()\n    while True:\n        d = file.read(8096)\n        if not d:\n            break\n        m.update(d)\n    return m.hexdigest()",
    "docstring": "Calculate the md5 checksum of a file-like object without reading its whole content in memory. >>> from io import BytesIO >>> _md5sum(BytesIO(b'file content to hash')) '784406af91dd5a54fbb9c84c2236595a'",
    "type": "function",
    "file_path": "scrapy\\scrapy\\pipelines\\files.py",
    "ast_data": "FunctionDef name:_md5sum arg:file arguments arg Assign Call While Assign Call If Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, fn, name=None, dtype=None, **kwargs):\n    super(SumOverBatchSizeMetricWrapper, self).__init__(name=name, dtype=dtype)\n    self._fn = fn\n    self._fn_kwargs = kwargs",
    "docstring": "Creates a instance. Args: fn: The metric function to wrap, with signature . name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. **kwargs: The keyword arguments that are passed on to .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fn arg:name arg:dtype arguments arg arg arg arg arg Call Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "_constraint_violation_fn",
    "source_code": "def _constraint_violation_fn(self, x):\n    S = np.size(x) // self.parameter_count\n    _out = np.zeros((S, self.total_constraints))\n    offset = 0\n    for con in self._wrapped_constraints:\n        c = con.violation(x.T).T\n        if c.shape[-1] != con.num_constr or (S > 1 and c.shape[0] != S):\n            raise RuntimeError('An array returned from a Constraint has the wrong shape. If `vectorized is False` the Constraint should return an array of shape (M,). If `vectorized is True` then the Constraint must return an array of shape (M, S), where S is the number of solution vectors and M is the number of constraint components in a given Constraint object.')\n        c = np.reshape(c, (S, con.num_constr))\n        _out[:, offset:offset + con.num_constr] = c\n        offset += con.num_constr\n    return _out",
    "docstring": "Calculates total constraint violation for all the constraints, for a set of solutions. Parameters ---------- x : ndarray Solution vector(s). Has shape (S, N), or (N,), where S is the number of solutions to investigate and N is the number of parameters. Returns ------- cv : ndarray Total violation of constraints. Has shape ``, where M is the total number of constraint components (which is not necessarily equal to len(self._wrapped_constraints)).",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:_constraint_violation_fn arg:self arg:x arguments arg arg Assign Call Assign Call Assign For Assign Call If BoolOp Compare BoolOp Compare Compare Raise Call Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "variance",
    "source_code": "@abstractmethod\ndef variance(self, *, method):\n    raise NotImplementedError()",
    "docstring": "Variance (central second moment) Parameters ---------- method : {None, 'formula', 'transform', 'normalize', 'quadrature', 'cache'} Method used to calculate the central second moment. Not all methods are available for all distributions. See for details. See Also -------- moment standard_deviation mean References ---------- .. [1] Variance, *Wikipedia*, Examples -------- Instantiate a distribution with the desired parameters: >>> from scipy import stats >>> X = stats.Normal(mu=1., sigma=2.) Evaluate the variance: >>> X.variance() 4.0 >>> X.variance() == X.moment(order=2, kind='central') == X.sigma**2 True",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_probability_distribution.py",
    "ast_data": "FunctionDef name:variance arg:self arguments arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "bracket_minimum",
    "source_code": "def bracket_minimum(f, xm0, *, xl0=None, xr0=None, xmin=None, xmax=None, factor=None, args=(), maxiter=1000):\n    res = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, factor=factor, args=args, maxiter=maxiter)\n    res.bracket = (res.xl, res.xm, res.xr)\n    res.f_bracket = (res.fl, res.fm, res.fr)\n    del res.xl\n    del res.xm\n    del res.xr\n    del res.fl\n    del res.fm\n    del res.fr\n    return res",
    "docstring": "Bracket the minimum of a unimodal, real-valued function of a real variable. For each element of the output of , seeks the scalar bracket points `ffxffxscipy.optimize.OptimizeResultfxmin = f(xm)`scipy.optimize.bracketscipy.optimize.bracketxlxrwxminxmaxwfactorxmaxwxmaxxmaxxmaxbracket_miniumbracket_minimumfind_minimum` at once: >>> import numpy as np >>> c = np.asarray([1, 1.5, 2]) >>> res_bracket = elementwise.bracket_minimum(f, 0, args=(c,)) >>> res_bracket.bracket (array([0. , 0.5, 0.5]), array([0.5, 1.5, 1.5]), array([1.5, 2.5, 2.5])) >>> res_minimum = elementwise.find_minimum(f, res_bracket.bracket, args=(c,)) >>> res_minimum.x array([1.00000001, 1.5 , 2. ]) >>> res_minimum.f_x array([2., 2., 2.])",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_elementwise.py",
    "ast_data": "FunctionDef name:bracket_minimum arg:f arg:xm0 arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "register_endpoint",
    "source_code": "def register_endpoint(self, endpoint):\n    if isinstance(endpoint, type):\n        endpoint = endpoint(self)\n    else:\n        endpoint.server = self\n    endpoints = self._endpoints.setdefault(endpoint.ENDPOINT_NAME, [])\n    endpoints.append(endpoint)",
    "docstring": "Add extra endpoint to authorization server. e.g. RevocationEndpoint:: authorization_server.register_endpoint(RevocationEndpoint) :param endpoint_cls: A endpoint class or instance.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py",
    "ast_data": "FunctionDef name:register_endpoint arg:self arg:endpoint arguments arg arg If Call Assign Call Assign Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_ticks_position",
    "source_code": "def _get_ticks_position(self):\n    representative_ticks = []\n    if not isinstance(self.get_major_locator(), NullLocator):\n        representative_ticks.append(self.majorTicks[0])\n    if not isinstance(self.get_minor_locator(), NullLocator):\n        representative_ticks.append(self.minorTicks[0])\n    if all((tick.tick1line.get_visible() and (not tick.tick2line.get_visible()) and tick.label1.get_visible() and (not tick.label2.get_visible()) for tick in representative_ticks)):\n        return 1\n    elif all((tick.tick2line.get_visible() and (not tick.tick1line.get_visible()) and tick.label2.get_visible() and (not tick.label1.get_visible()) for tick in representative_ticks)):\n        return 2\n    elif all((tick.tick1line.get_visible() and tick.tick2line.get_visible() and tick.label1.get_visible() and (not tick.label2.get_visible()) for tick in representative_ticks)):\n        return 'default'\n    else:\n        return 'unknown'",
    "docstring": "Helper for and . Check the visibility of tick1line, label1, tick2line, and label2 on the first major and the first minor ticks, provided these ticks are used i.e. the corresponding locator is not a NullLocator, and return - 1 if only tick1line and label1 are visible (which corresponds to \"bottom\" for the x-axis and \"left\" for the y-axis); - 2 if only tick2line and label2 are visible (which corresponds to \"top\" for the x-axis and \"right\" for the y-axis); - \"default\" if only tick1line, tick2line and label1 are visible; - \"unknown\" otherwise.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_get_ticks_position arg:self arguments arg Assign If Call Call Call If Call Call Call If Call BoolOp Call Call Call Call Return return:yes If Call BoolOp Call Call Call Call Return return:yes If Call BoolOp Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "AdminAuthenticationForm",
    "source_code": "class AdminAuthenticationForm(AuthenticationForm):\n    error_messages = {**AuthenticationForm.error_messages, 'invalid_login': _('Please enter the correct %(username)s and password for a staff account. Note that both fields may be case-sensitive.')}\n    required_css_class = 'required'\n\n    def confirm_login_allowed(self, user):\n        super().confirm_login_allowed(user)\n        if not user.is_staff:\n            raise ValidationError(self.error_messages['invalid_login'], code='invalid_login', params={'username': self.username_field.verbose_name})",
    "docstring": "A custom authentication form used in the admin app.",
    "type": "class",
    "file_path": "django\\django\\contrib\\admin\\forms.py",
    "ast_data": "ClassDef name:AdminAuthenticationForm Assign Call Assign FunctionDef name:confirm_login_allowed arg:self arg:user arguments arg arg Call Call If Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_subshape",
    "source_code": "def _is_subshape(left, right):\n    if right.dims is None:\n        return True\n    if left.ndims != right.ndims:\n        return False\n    for ldim, rdim in zip(left.dims, right.dims):\n        if rdim.value is not None and ldim.value != rdim.value:\n            return False\n    return True",
    "docstring": "Returns True if left shape is at least as specific as right shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\control_flow.py",
    "ast_data": "FunctionDef name:_is_subshape arg:left arg:right arguments arg arg If Compare Return return:yes If Compare Return return:yes For Call If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_label_position",
    "source_code": "def set_label_position(self, position):\n    raise NotImplementedError()",
    "docstring": "Set the label position (top or bottom) Parameters ---------- position : {'top', 'bottom'}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_label_position arg:self arg:position arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "xlog1py",
    "source_code": "@tf_export('math.xlog1py')\n@dispatch.register_binary_elementwise_api\n@dispatch.add_dispatch_support\ndef xlog1py(x, y, name=None):\n    with ops.name_scope(name, 'xlog1py', [x]):\n        return gen_math_ops.xlog1py(x, y)",
    "docstring": "Compute x * log1p(y). Given and , compute . This function safely returns zero when , no matter what the value of is. Example: >>> tf.math.xlog1py(0., 1.) >>> tf.math.xlog1py(1., 1.) >>> tf.math.xlog1py(2., 2.) >>> tf.math.xlog1py(0., -1.) Args: x: A of type , , , , y: A of type , , , , name: A name for the operation (optional). Returns: . @compatibility(scipy) Equivalent to scipy.special.xlog1py @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:xlog1py arg:x arg:y arg:name arguments arg arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "as_cluster_def",
    "source_code": "def as_cluster_def(self):\n    return self._cluster_def",
    "docstring": "Returns a protocol buffer based on this cluster.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py",
    "ast_data": "FunctionDef name:as_cluster_def arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_navigate",
    "source_code": "def get_navigate(self):\n    return self._navigate",
    "docstring": "Get whether the Axes responds to navigation commands.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_navigate arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start(self):\n    self.count = 0",
    "docstring": "Initialize the internal counter.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\gctools.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_raise_error_for_inputs_not_on_cpu",
    "source_code": "def _raise_error_for_inputs_not_on_cpu(self, flat_inputs, flat_paths):\n\n    def check_device(path, device_string):\n        spec = tf_device.DeviceSpec.from_string(device_string)\n        if spec.device_type == 'TPU':\n            raise ValueError(\"Received input tensor {} which is on a TPU input device {}. Input tensors for TPU embeddings must be placed on the CPU. Please ensure that your dataset is prefetching tensors to the host by setting the 'experimental_fetch_to_device' option of the dataset distribution function. See the documentation of the enqueue method for an example.\".format(path, device_string))\n    for input_tensor, input_path in zip(flat_inputs, flat_paths):\n        if nest.is_nested_or_composite(input_tensor):\n            input_tensors = nest.flatten(input_tensor, expand_composites=True)\n        else:\n            input_tensors = [input_tensor]\n        for t in input_tensors:\n            if t.op.type == 'Identity' and t.op.inputs[0].op.type == 'TPUReplicatedInput':\n                for tensor in t.op.inputs[0].op.inputs:\n                    check_device(input_path, tensor.device)\n            else:\n                check_device(input_path, t.device)",
    "docstring": "Checks all tensors in features to see are placed on the CPU.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:_raise_error_for_inputs_not_on_cpu arg:self arg:flat_inputs arg:flat_paths arguments arg arg arg FunctionDef name:check_device arg:path arg:device_string arguments arg arg Assign Call If Compare Raise Call Call For Call If Call Assign Call Assign For If BoolOp Compare Compare For Call Call"
  },
  {
    "library": "pytorch",
    "name": "QuantizeHandler",
    "source_code": "class QuantizeHandler(ABC):\n\n    def __init__(self, node_pattern: NodePattern, modules: dict[str, torch.nn.Module], root_node_getter: Optional[Callable]=None, is_custom_module=False, is_standalone_module=False):\n        self.node_pattern = node_pattern\n        self.modules = modules\n        if root_node_getter is None:\n            root_node_getter = _default_root_node_getter\n        self.root_node = root_node_getter(node_pattern)\n        self.is_custom_module_ = is_custom_module\n        self.is_standalone_module_ = is_standalone_module\n        self.num_tensor_args = 0\n        if isinstance(self.root_node, Node):\n            cache_for_no_tensor_check: dict[Node, bool] = {}\n            for arg_idx in range(len(self.root_node.args)):\n                arg = self.root_node.args[arg_idx]\n                if isinstance(arg, Node) and (not all_node_args_have_no_tensors(arg, self.modules, cache_for_no_tensor_check)):\n                    self.num_tensor_args += 1\n\n    def is_general_tensor_value_op(self) -> bool:\n        return False\n\n    def is_custom_module(self):\n        return self.is_custom_module_\n\n    def is_standalone_module(self):\n        return self.is_standalone_module_",
    "docstring": "Base handler class for the quantizer patterns",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\quantize_handler.py",
    "ast_data": "ClassDef name:QuantizeHandler FunctionDef name:__init__ arg:self arg:node_pattern arg:modules arg:root_node_getter arg:is_custom_module arg:is_standalone_module arguments arg arg arg arg arg arg Assign Assign If Compare Assign Assign Call Assign Assign Assign If Call For Call Call Assign If BoolOp Call Call FunctionDef name:is_general_tensor_value_op arg:self arguments arg Return return:yes FunctionDef name:is_custom_module arg:self arguments arg Return return:yes FunctionDef name:is_standalone_module arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_rng_state_all",
    "source_code": "def set_rng_state_all(new_states: Iterable[Tensor]) -> None:\n    for i, state in enumerate(new_states):\n        set_rng_state(state, i)",
    "docstring": "Set the random number generator state of all devices. Args: new_states (Iterable of torch.ByteTensor): The desired state for each device.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\random.py",
    "ast_data": "FunctionDef name:set_rng_state_all arg:new_states arguments arg For Call Call"
  },
  {
    "library": "pygame",
    "name": "get_top_layer",
    "source_code": "def get_top_layer(self):\n    return self._spritelayers[self._spritelist[-1]]",
    "docstring": "return the top layer LayeredUpdates.get_top_layer(): return layer",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:get_top_layer arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "schedule_collective_for_overlap",
    "source_code": "def schedule_collective_for_overlap(snode):\n    assert contains_collective(snode)\n    schedule(snode)\n    collective_cost = snode_to_cost[snode]\n    while collective_cost > 0 and (candidate := get_overlapping_candidate()) is not None:\n        ready.remove(candidate)\n        schedule(candidate.snode)\n        collective_cost -= snode_to_cost[candidate.snode]\n    heapq.heapify(ready)",
    "docstring": "Schedules collective node , along with one or more compute nodes to overlap with it. The strategy is described in the comment of .",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\comms.py",
    "ast_data": "FunctionDef name:schedule_collective_for_overlap arg:snode arguments arg Call Call Assign While BoolOp Compare Compare Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "desc",
    "source_code": "class desc(nodes.Admonition, nodes.Element):\n    pass",
    "docstring": "Node for a list of object signatures and a common description of them. Contains one or more :py:class: nodes and then a single :py:class: node. This node always has two classes: - The name of the domain it belongs to, e.g., ``.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc"
  },
  {
    "library": "tensorflow",
    "name": "add_logs",
    "source_code": "def add_logs(self, logs):\n    self._log.extend(logs)\n    for log in logs:\n        print('%s line %d:%d: %s' % log)",
    "docstring": "Record a log and print it. The log should be a tuple , which will be printed and recorded. It is part of the log available in the property. Args: logs: The logs to add. Must be a list of tuples .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:add_logs arg:self arg:logs arguments arg arg Call For Call"
  },
  {
    "library": "scipy",
    "name": "logsf",
    "source_code": "def logsf(self, k, *args, **kwds):\n    args, loc, _ = self._parse_args(*args, **kwds)\n    k, loc = map(asarray, (k, loc))\n    args = tuple(map(asarray, args))\n    _a, _b = self._get_support(*args)\n    k = asarray(k - loc)\n    cond0 = self._argcheck(*args)\n    cond1 = (k >= _a) & (k < _b)\n    cond2 = (k < _a) & cond0\n    cond = cond0 & cond1\n    output = empty(shape(cond), 'd')\n    output.fill(-inf)\n    place(output, 1 - cond0 + np.isnan(k), self.badvalue)\n    place(output, cond2, 0.0)\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(k,) + args)\n        place(output, cond, self._logsf(*goodargs))\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Log of the survival function of the given RV. Returns the log of the \"survival function,\" defined as 1 - , evaluated at . Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- logsf : ndarray Log of the survival function evaluated at .",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:logsf arg:self arg:k arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Compare Compare Assign Compare Assign Assign Call Call Call Call Call Call If Call Assign Call Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "recfromcsv",
    "source_code": "def recfromcsv(fname, **kwargs):\n    warnings.warn('`recfromcsv` is deprecated, use `numpy.genfromtxt` with comma as `delimiter` instead. (deprecated in NumPy 2.0)', DeprecationWarning, stacklevel=2)\n    kwargs.setdefault('case_sensitive', 'lower')\n    kwargs.setdefault('names', True)\n    kwargs.setdefault('delimiter', ',')\n    kwargs.setdefault('dtype', None)\n    output = genfromtxt(fname, **kwargs)\n    usemask = kwargs.get('usemask', False)\n    if usemask:\n        from numpy.ma.mrecords import MaskedRecords\n        output = output.view(MaskedRecords)\n    else:\n        output = output.view(np.recarray)\n    return output",
    "docstring": "Load ASCII data stored in a comma-separated file. The returned array is a record array (if `recarrayma.mrecords.MaskedRecordsnumpy.genfromtxtdelimitergenfromtxtdtype` is None, which means that the data-type of the output array will be determined from the data.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_npyio_impl.py",
    "ast_data": "FunctionDef name:recfromcsv arg:fname arguments arg arg Call Call Call Call Call Assign Call Assign Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_login_url",
    "source_code": "def get_login_url(self):\n    login_url = self.login_url or settings.LOGIN_URL\n    if not login_url:\n        raise ImproperlyConfigured(f'{self.__class__.__name__} is missing the login_url attribute. Define {self.__class__.__name__}.login_url, settings.LOGIN_URL, or override {self.__class__.__name__}.get_login_url().')\n    return str(login_url)",
    "docstring": "Override this method to override the login_url attribute.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\mixins.py",
    "ast_data": "FunctionDef name:get_login_url arg:self arguments arg Assign BoolOp If Raise Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "from_curl",
    "source_code": "@classmethod\ndef from_curl(cls, curl_command: str, ignore_unknown_options: bool=True, **kwargs: Any) -> Self:\n    request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options)\n    request_kwargs.update(kwargs)\n    return cls(**request_kwargs)",
    "docstring": "Create a Request object from a string containing a _ command. It populates the HTTP method, the URL, the headers, the cookies and the body. It accepts the same arguments as the :class: class, taking preference and overriding the values of the same arguments contained in the cURL command. Unrecognized options are ignored by default. To raise an error when finding unknown options call this method by passing `from_curl~scrapy.Request~scrapy.http.JsonRequest~scrapy.http.XmlRpcRequestdownloader middlewares spider middlewares ~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware~scrapy.Requestcurl2scrapy `_.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\http\\request\\__init__.py",
    "ast_data": "FunctionDef name:from_curl arg:cls arg:curl_command arg:ignore_unknown_options arguments arg arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Shubert04",
    "source_code": "class Shubert04(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[-0.80032121, -7.08350592]]\n        self.fglob = -29.016015\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        j = atleast_2d(arange(1, 6)).T\n        y = -j * cos((j + 1) * x + j)\n        return sum(sum(y))",
    "docstring": "Shubert 4 objective function. This class defines the Shubert 4 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Shubert04}}(x) = \\left(\\sum_{i=1}^n \\sum_{j=1}^5 -j \\cos ((j+1)x_i + j)\\right) Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: (and many others). .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO: Jamil#135 has wrong global minimum value, and is missing a minus sign before the whole thing.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Shubert04 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "constant_to_device",
    "source_code": "def constant_to_device(self, device: torch.device) -> IRNode:\n    loader = self.make_loader()\n    loader = patch.object(ConstantBuffer, 'override_device', device)(loader)\n    return Reduction(device=device, dtype=self.dtype, inner_fn=loader, ranges=self.ranges, reduction_ranges=self.reduction_ranges, reduction_type=self.reduction_type, src_dtype=self.src_dtype, reduction_hint=ReductionHint.DEFAULT)",
    "docstring": "Move this to a given device. Requires that all reads are to constants.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:constant_to_device arg:self arg:device arguments arg arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_min_matrix_dim_tensor",
    "source_code": "def _min_matrix_dim_tensor(self):\n    return math_ops.reduce_min(self.shape_tensor()[-2:])",
    "docstring": "Minimum of domain/range dimension, as a tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_identity.py",
    "ast_data": "FunctionDef name:_min_matrix_dim_tensor arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_AddAndReturnDiag",
    "source_code": "class _AddAndReturnDiag(_Adder):\n\n    def can_add(self, op1, op2):\n        types = {_type(op1), _type(op2)}\n        return not types.difference(_DIAG_LIKE)\n\n    def _add(self, op1, op2, operator_name, hints):\n        return linear_operator_diag.LinearOperatorDiag(diag=op1.diag_part() + op2.diag_part(), is_non_singular=hints.is_non_singular, is_self_adjoint=hints.is_self_adjoint, is_positive_definite=hints.is_positive_definite, name=operator_name)",
    "docstring": "Handles additions resulting in a Diag operator.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_addition.py",
    "ast_data": "ClassDef name:_AddAndReturnDiag FunctionDef name:can_add arg:self arg:op1 arg:op2 arguments arg arg arg Assign Call Call Return return:yes Call FunctionDef name:_add arg:self arg:op1 arg:op2 arg:operator_name arg:hints arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "min_fitting_element",
    "source_code": "def min_fitting_element(start: int, step: int, lower_limit: int) -> int:\n    no_steps = -(-(lower_limit - start) // abs(step))\n    return start + abs(step) * no_steps",
    "docstring": "Returns the smallest element greater than or equal to the limit",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:min_fitting_element arg:start arg:step arg:lower_limit arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_running_on_cpu",
    "source_code": "def _is_running_on_cpu():\n    return tpu_function.get_tpu_context().number_of_shards is None",
    "docstring": "Returns True if the current context is CPU model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:_is_running_on_cpu arguments Return return:yes Compare Call"
  },
  {
    "library": "django",
    "name": "ask_not_null_addition",
    "source_code": "def ask_not_null_addition(self, field_name, model_name):\n    if not self.dry_run:\n        choice = self._choice_input(f\"It is impossible to add a non-nullable field '{field_name}' to {model_name} without specifying a default. This is because the database needs something to populate existing rows.\\nPlease select a fix:\", ['Provide a one-off default now (will be set on all existing rows with a null value for this column)', 'Quit and manually define a default value in models.py.'])\n        if choice == 2:\n            sys.exit(3)\n        else:\n            return self._ask_default()\n    return None",
    "docstring": "Adding a NOT NULL field to a model.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\questioner.py",
    "ast_data": "FunctionDef name:ask_not_null_addition arg:self arg:field_name arg:model_name arguments arg arg arg If Assign Call If Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "pandas",
    "name": "_maybe_cast_listlike_indexer",
    "source_code": "def _maybe_cast_listlike_indexer(self, target) -> Index:\n    target_index = ensure_index(target)\n    if not hasattr(target, 'dtype') and self.dtype == object and (target_index.dtype == 'string'):\n        target_index = Index(target, dtype=self.dtype)\n    return target_index",
    "docstring": "Analogue to maybe_cast_indexer for get_indexer instead of get_loc.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_maybe_cast_listlike_indexer arg:self arg:target arguments arg arg Assign Call If BoolOp Call Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "dtype_is_implied",
    "source_code": "def dtype_is_implied(dtype):\n    dtype = np.dtype(dtype)\n    if format_options.get()['legacy'] <= 113 and dtype.type == np.bool:\n        return False\n    if dtype.names is not None:\n        return False\n    if not dtype.isnative:\n        return False\n    return dtype.type in _typelessdata",
    "docstring": "Determine if the given dtype is implied by the representation of its values. Parameters ---------- dtype : dtype Data type Returns ------- implied : bool True if the dtype is implied by the representation of its values. Examples -------- >>> import numpy as np >>> np._core.arrayprint.dtype_is_implied(int) True >>> np.array([1, 2, 3], int) array([1, 2, 3]) >>> np._core.arrayprint.dtype_is_implied(np.int8) False >>> np.array([1, 2, 3], np.int8) array([1, 2, 3], dtype=int8)",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\arrayprint.py",
    "ast_data": "FunctionDef name:dtype_is_implied arg:dtype arguments arg Assign Call If BoolOp Compare Call Compare Return return:yes If Compare Return return:yes If Return return:yes Return return:yes Compare"
  },
  {
    "library": "pandas",
    "name": "box_index",
    "source_code": "@box(IndexType)\ndef box_index(typ, val, c):\n    index = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)\n    res = cgutils.alloca_once_value(c.builder, index.parent)\n    with c.builder.if_else(cgutils.is_not_null(c.builder, index.parent)) as (has_parent, otherwise):\n        with has_parent:\n            c.pyapi.incref(index.parent)\n        with otherwise:\n            class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Index))\n            array_obj = c.box(typ.as_array, index.data)\n            if isinstance(typ.dtype, types.UnicodeCharSeq):\n                object_str_obj = c.pyapi.unserialize(c.pyapi.serialize_object('object'))\n                array_obj = c.pyapi.call_method(array_obj, 'astype', (object_str_obj,))\n                c.pyapi.decref(object_str_obj)\n            index_obj = c.pyapi.call_method(class_obj, '_simple_new', (array_obj,))\n            index.parent = index_obj\n            c.builder.store(index_obj, res)\n            c.pyapi.decref(class_obj)\n            c.pyapi.decref(array_obj)\n    return c.builder.load(res)",
    "docstring": "Convert a native index structure to a Index object. If our native index is of a numpy string dtype, we'll cast it to object.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\_numba\\extensions.py",
    "ast_data": "FunctionDef name:box_index arg:typ arg:val arg:c arguments arg arg arg Assign Call Call Assign Call With Call Call With Call With Assign Call Call Assign Call If Call Assign Call Call Assign Call Call Assign Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    tracebacks = cherrypy.request.show_tracebacks\n    if tracebacks:\n        trace = 'off'\n    else:\n        trace = 'on'\n    return '\\n        <html><body>\\n            <p>Toggle tracebacks <a href=\"toggleTracebacks\">%s</a></p>\\n            <p><a href=\"/doesNotExist\">Click me; I\\'m a broken link!</a></p>\\n            <p>\\n              <a href=\"/error?code=403\">\\n                Use a custom error page from a file.\\n              </a>\\n            </p>\\n            <p>These errors are explicitly raised by the application:</p>\\n            <ul>\\n                <li><a href=\"/error?code=400\">400</a></li>\\n                <li><a href=\"/error?code=401\">401</a></li>\\n                <li><a href=\"/error?code=402\">402</a></li>\\n                <li><a href=\"/error?code=500\">500</a></li>\\n            </ul>\\n            <p><a href=\"/messageArg\">You can also set the response body\\n            when you raise an error.</a></p>\\n        </body></html>\\n        ' % trace",
    "docstring": "Produce HTTP response body of error display app index URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut10_http_errors.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Assign If Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n    self._validate_kwargs(kwargs, support_partition=False)\n    dtype = _assert_float_dtype(dtype)\n    if len(shape) < 2:\n        raise ValueError(f'The tensor to initialize, specified by argument `shape` must be at least two-dimensional. Received shape={shape}')\n    num_rows = 1\n    for dim in shape[:-1]:\n        num_rows *= dim\n    num_cols = shape[-1]\n    flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))\n    a = self._random_generator.random_normal(flat_shape, dtype=dtype)\n    q, r = gen_linalg_ops.qr(a, full_matrices=False)\n    d = array_ops.diag_part(r)\n    q *= math_ops.sign(d)\n    if num_rows < num_cols:\n        q = array_ops.matrix_transpose(q)\n    return self.gain * array_ops.reshape(q, shape)",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not floating point or the input shape is not valid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If Compare Call Raise Call Assign For Assign Assign Call Call Assign Call Assign Call Assign Call Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "pad_sparse_embedding_lookup_indices",
    "source_code": "def pad_sparse_embedding_lookup_indices(sparse_indices, padded_size):\n    batch_size = sparse_indices.dense_shape[0]\n    sparse_indices = sparse_ops.sparse_slice(sparse_indices, [0, 0], [batch_size, padded_size])\n    indices, values = (sparse_indices.indices, sparse_indices.values)\n    padded_values = array_ops.scatter_nd(indices, math_ops.cast(values, dtypes.int32), shape=(batch_size, padded_size))\n    weights = array_ops.ones_like(values, dtype=dtypes.float32)\n    padded_mask = array_ops.scatter_nd(indices, weights, shape=(batch_size, padded_size))\n    return (padded_values, padded_mask)",
    "docstring": "Creates statically-sized Tensors containing indices and weights. From third_party/cloud_tpu/models/movielens/tpu_embedding.py Also computes sparse_indices.values % embedding_table_size, for equivalent functionality to sparse_column_with_integerized_feature. The returned padded weight Tensor also doubles as a mask indicating which values in the returned padded indices Tensor are indices versus padded zeros. Args: sparse_indices: SparseTensor of embedding lookup indices. padded_size: Number of columns of the returned Tensors. Indices which fall out of bounds will be truncated to the padded size. Returns: (sparse_indices.values padded to the specified size, a mask the same size as the returned padded values in which 0s indicate padded locations and 1s (or values from sparse_weights) indicate actual values)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column_v2.py",
    "ast_data": "FunctionDef name:pad_sparse_embedding_lookup_indices arg:sparse_indices arg:padded_size arguments arg arg Assign Assign Call Assign Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_signature_to_agg_fn_map",
    "source_code": "def get_signature_to_agg_fn_map(self):\n    return {TRACE_MODE_NORM: linalg_ops.norm, TRACE_MODE_HISTORY: math_ops.reduce_max, TRACE_MODE_MAX_ABS: math_ops.reduce_max, TRACE_MODE_NAN_INF: math_ops.reduce_max, TT_SUMMARY_NORM: linalg_ops.norm, TT_SUMMARY_MAX: math_ops.reduce_max, TT_SUMMARY_MAX_ABS: lambda t, axis=0: math_ops.reduce_max(math_ops.abs(t), axis=axis), TT_SUMMARY_MIN: math_ops.reduce_min, TT_SUMMARY_SPARSITY: math_ops.reduce_mean, TT_SUMMARY_MEAN: math_ops.reduce_mean, TT_SUMMARY_VAR: math_ops.reduce_max, TT_SUMMARY_SIZE: math_ops.reduce_sum}",
    "docstring": "Returns a map that contains the aggregate function for each signature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:get_signature_to_agg_fn_map arg:self arguments arg Return return:yes arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "value",
    "source_code": "def value(self):\n    with c_api_util.tf_buffer() as buffer_:\n        pywrap_tfe.TFE_MonitoringStringGaugeCellValue(self._cell, buffer_)\n        value = pywrap_tf_session.TF_GetBuffer(buffer_).decode('utf-8')\n    return value",
    "docstring": "Retrieves the current value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg With Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cluster_resolver",
    "source_code": "@property\ndef cluster_resolver(self):\n    if hasattr(self.extended, '_cluster_resolver'):\n        return self.extended._cluster_resolver\n    return None",
    "docstring": "Returns the cluster resolver associated with this strategy. In general, when using a multi-worker strategy such as or , there is a associated with the strategy used, and such an instance is returned by this property. Strategies that intend to have an associated must set the relevant attribute, or override this property; otherwise, is returned by default. Those strategies should also provide information regarding what is returned by this property. Single-worker strategies usually do not have a , and in those cases this property will return . The may be useful when the user needs to access information such as the cluster spec, task type or task id. For example, For more information, please see 's API docstring. Returns: The cluster resolver associated with this strategy. Returns if a cluster resolver is not applicable or available in this strategy.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:cluster_resolver arg:self arguments arg If Call Return return:yes Return return:no"
  },
  {
    "library": "uvicorn",
    "name": "pause_writing",
    "source_code": "def pause_writing(self) -> None:\n    self.flow.pause_writing()",
    "docstring": "Called by the transport when the write buffer exceeds the high water mark.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\http\\h11_impl.py",
    "ast_data": "FunctionDef name:pause_writing arg:self arguments arg Call"
  },
  {
    "library": "sphinx",
    "name": "ASTParenAttribute",
    "source_code": "class ASTParenAttribute(ASTAttribute):\n\n    def __init__(self, id: str, arg: str) -> None:\n        self.id = id\n        self.arg = arg\n\n    def __eq__(self, other: object) -> bool:\n        if not isinstance(other, ASTParenAttribute):\n            return NotImplemented\n        return self.id == other.id and self.arg == other.arg\n\n    def __hash__(self) -> int:\n        return hash((self.id, self.arg))\n\n    def _stringify(self, transform: StringifyTransform) -> str:\n        return f'{self.id}({self.arg})'\n\n    def describe_signature(self, signode: TextElement) -> None:\n        signode.append(nodes.Text(str(self)))",
    "docstring": "For paren attributes defined by the user.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\cfamily.py",
    "ast_data": "ClassDef name:ASTParenAttribute FunctionDef name:__init__ arg:self arg:id arg:arg arguments arg arg arg Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes Return return:yes BoolOp Compare Compare FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:_stringify arg:self arg:transform arguments arg arg Return return:yes FunctionDef name:describe_signature arg:self arg:signode arguments arg arg Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_lars_path_residues",
    "source_code": "def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None, copy=True, method='lar', verbose=False, fit_intercept=True, max_iter=500, eps=np.finfo(float).eps, positive=False):\n    X_train = _check_copy_and_writeable(X_train, copy)\n    y_train = _check_copy_and_writeable(y_train, copy)\n    X_test = _check_copy_and_writeable(X_test, copy)\n    y_test = _check_copy_and_writeable(y_test, copy)\n    if fit_intercept:\n        X_mean = X_train.mean(axis=0)\n        X_train -= X_mean\n        X_test -= X_mean\n        y_mean = y_train.mean(axis=0)\n        y_train = as_float_array(y_train, copy=False)\n        y_train -= y_mean\n        y_test = as_float_array(y_test, copy=False)\n        y_test -= y_mean\n    alphas, active, coefs = lars_path(X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False, method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps, positive=positive)\n    residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]\n    return (alphas, active, coefs, residues.T)",
    "docstring": "Compute the residues on left-out data for a full LARS path Parameters ----------- X_train : array-like of shape (n_samples, n_features) The data to fit the LARS on y_train : array-like of shape (n_samples,) The target variable to fit LARS on X_test : array-like of shape (n_samples, n_features) The data to compute the residues on y_test : array-like of shape (n_samples,) The target variable to compute the residues on Gram : None, 'auto' or array-like of shape (n_features, n_features), default=None Precomputed Gram matrix (X' * X), if ``, whichever is smaller. active : list Indices of active variables at the end of the path. coefs : array-like of shape (n_features, n_alphas) Coefficients along the path residues : array-like of shape (n_alphas, n_samples) Residues of the prediction on the test data",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_least_angle.py",
    "ast_data": "FunctionDef name:_lars_path_residues arg:X_train arg:y_train arg:X_test arg:y_test arg:Gram arg:copy arg:method arg:verbose arg:fit_intercept arg:max_iter arg:eps arg:positive arguments arg arg arg arg arg arg arg arg arg arg arg arg Call Assign Call Assign Call Assign Call Assign Call If Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "prepare_function_arguments",
    "source_code": "def prepare_function_arguments(func: Callable, args: tuple, kwargs: dict, *, num_required_args: int) -> tuple[tuple, dict]:\n    if not kwargs:\n        return (args, kwargs)\n    signature = inspect.signature(func)\n    arguments = signature.bind(*[_sentinel] * num_required_args, *args, **kwargs)\n    arguments.apply_defaults()\n    args = arguments.args\n    kwargs = arguments.kwargs\n    if kwargs:\n        raise NumbaUtilError('numba does not support keyword-only argumentshttps://github.com/numba/numba/issues/2916, https://github.com/numba/numba/issues/6846')\n    args = args[num_required_args:]\n    return (args, kwargs)",
    "docstring": "Prepare arguments for jitted function. As numba functions do not support kwargs, we try to move kwargs into args if possible. Parameters ---------- func : function User defined function args : tuple User input positional arguments kwargs : dict User input keyword arguments num_required_args : int The number of leading positional arguments we will pass to udf. These are not supplied by the user. e.g. for groupby we require \"values\", \"index\" as the first two arguments: , in this case num_required_args=2. See :func: Returns ------- tuple[tuple, dict] args, kwargs",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\util\\numba_.py",
    "ast_data": "FunctionDef name:prepare_function_arguments arg:func arg:args arg:kwargs arguments arg arg arg arg If Return return:yes Assign Call Assign Call Call Assign Assign If Raise Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_get_message",
    "source_code": "def _get_message(status):\n    messages = ['Optimization terminated successfully.', 'The iteration limit was reached before the algorithm converged.', 'The algorithm terminated successfully and determined that the problem is infeasible.', 'The algorithm terminated successfully and determined that the problem is unbounded.', 'Numerical difficulties were encountered before the problem converged. Please check your problem formulation for errors, independence of linear equality constraints, and reasonable scaling and matrix condition numbers. If you continue to encounter this error, please submit a bug report.']\n    return messages[status]",
    "docstring": "Given problem status code, return a more detailed message. Parameters ---------- status : int An integer representing the exit status of the optimization:: 0 : Optimization terminated successfully 1 : Iteration limit reached 2 : Problem appears to be infeasible 3 : Problem appears to be unbounded 4 : Serious numerical difficulties encountered Returns ------- message : str A string descriptor of the exit status of the optimization.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_ip.py",
    "ast_data": "FunctionDef name:_get_message arg:status arguments arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_NoParamDecoratorContextManager",
    "source_code": "class _NoParamDecoratorContextManager(_DecoratorContextManager):\n\n    def __new__(cls, orig_func=None):\n        if orig_func is None:\n            return super().__new__(cls)\n        return cls()(orig_func)",
    "docstring": "Allow a context manager to be used as a decorator without parentheses.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\_contextlib.py",
    "ast_data": "ClassDef name:_NoParamDecoratorContextManager FunctionDef name:__new__ arg:cls arg:orig_func arguments arg arg If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_extra_descriptor_filter",
    "source_code": "def get_extra_descriptor_filter(self, instance):\n    return {}",
    "docstring": "Return an extra filter condition for related object fetching when user does 'instance.fieldname', that is the extra filter is used in the descriptor of the field. The filter should be either a dict usable in .filter(**kwargs) call or a Q-object. The condition will be ANDed together with the relation's joining columns. A parallel method is get_extra_restriction() which is used in JOIN and subquery conditions.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:get_extra_descriptor_filter arg:self arg:instance arguments arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "register_custom_device",
    "source_code": "def register_custom_device(device_capsule, device_name, device_info_capsule):\n    context().register_custom_device(device_capsule, device_name, device_info_capsule)",
    "docstring": "Calls TFE_RegisterCustomDevice to register a custom device with Python. Enables using C extensions specifying a custom device from Python. See the experimental eager C API in tensorflow/c/eager/c_api_experimental.h for details. Note that custom devices are not currently supported inside s. Args: device_capsule: A PyCapsule with the name set to 'TFE_CustomDevice' containing a pointer to a TFE_CustomDevice struct. The capsule retains ownership of the memory. device_name: A string indicating the name to register the custom device under, e.g. '/job:localhost/replica:0/task:0/device:CUSTOM:0'. It may subsequently be passed to . device_info_capsule: A PyCapsule with the name set to 'TFE_CustomDevice_DeviceInfo' containing a pointer to a device-specific struct with the initial state of the custom device (the void* device_info argument to TFE_RegisterCustomDevice). This method takes ownership of the memory and clears the capsule destructor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:register_custom_device arg:device_capsule arg:device_name arg:device_info_capsule arguments arg arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "isbetter",
    "source_code": "def isbetter(f1: float, c1: float, f2: float, c2: float, ctol: float) -> bool:\n    if DEBUGGING:\n        assert not any(np.isnan([f1, c1]) | np.isposinf([f2, c2]))\n        assert not any(np.isnan([f2, c2]) | np.isposinf([f2, c2]))\n        assert c1 >= 0 and c2 >= 0\n        assert ctol >= 0\n    is_better = False\n    is_better = is_better or (any(np.isnan([f1, c1]) | np.isposinf([f1, c1])) and (not any(np.isnan([f2, c2]) | np.isposinf([f2, c2]))))\n    is_better = is_better or (f1 < f2 and c1 <= c2)\n    is_better = is_better or (f1 <= f2 and c1 < c2)\n    cref = 10 * max(EPS, min(ctol, 0.01 * CONSTRMAX))\n    is_better = is_better or (f1 < REALMAX and c1 <= ctol and (c2 > max(ctol, cref) or np.isnan(c2)))\n    if DEBUGGING:\n        assert not (is_better and f1 >= f2 and (c1 >= c2))\n        assert is_better or not (f1 <= f2 and c1 < c2)\n        assert is_better or not (f1 < f2 and c1 <= c2)\n    return is_better",
    "docstring": "This function compares whether FC1 = (F1, C1) is (strictly) better than FC2 = (F2, C2), which basically means that (F1 < F2 and C1 <= C2) or (F1 <= F2 and C1 < C2). It takes care of the cases where some of these values are NaN or Inf, even though some cases should never happen due to the moderated extreme barrier. At return, BETTER = TRUE if and only if (F1, C1) is better than (F2, C2). Here, C means constraint violation, which is a nonnegative number.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\selectx.py",
    "ast_data": "FunctionDef name:isbetter arg:f1 arg:c1 arg:f2 arg:c2 arg:ctol arguments arg arg arg arg arg If Call Call Call Call Call Call BoolOp Compare Compare Compare Assign Assign BoolOp BoolOp Call Call Call Call Call Call Assign BoolOp BoolOp Compare Compare Assign BoolOp BoolOp Compare Compare Assign Call Call Assign BoolOp BoolOp Compare Compare BoolOp Compare Call Call If BoolOp Compare Compare BoolOp BoolOp Compare Compare BoolOp BoolOp Compare Compare Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "resolve_xref",
    "source_code": "def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder, typ: str, target: str, node: pending_xref, contnode: Element) -> nodes.reference | None:\n    pass",
    "docstring": "Resolve the pending_xref *node* with the given *typ* and *target*. This method should return a new node, to replace the xref node, containing the *contnode* which is the markup content of the cross-reference. If no resolution can be found, None can be returned; the xref node will then given to the :event: event, and if that yields no resolution, replaced by *contnode*. The method can also raise :exc: to suppress the :event: event being emitted.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\__init__.py",
    "ast_data": "FunctionDef name:resolve_xref arg:self arg:env arg:fromdocname arg:builder arg:typ arg:target arg:node arg:contnode arguments arg arg arg arg arg arg arg arg"
  },
  {
    "library": "numpy",
    "name": "NotFoundError",
    "source_code": "class NotFoundError(DistutilsError):\n    pass",
    "docstring": "Some third-party program or library is not found.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:NotFoundError"
  },
  {
    "library": "pandas",
    "name": "_coerce_indexer_frozen",
    "source_code": "def _coerce_indexer_frozen(array_like, categories, copy: bool=False) -> np.ndarray:\n    array_like = coerce_indexer_dtype(array_like, categories)\n    if copy:\n        array_like = array_like.copy()\n    array_like.flags.writeable = False\n    return array_like",
    "docstring": "Coerce the array-like indexer to the smallest integer dtype that can encode all of the given categories. Parameters ---------- array_like : array-like categories : array-like copy : bool Returns ------- np.ndarray Non-writeable.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_coerce_indexer_frozen arg:array_like arg:categories arg:copy arguments arg arg arg Assign Call If Assign Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_canonical_name_git_grep",
    "source_code": "def get_canonical_name_git_grep(filename):\n    return re.sub('\\\\.pyx(\\\\.tp)?', '', filename)",
    "docstring": "Return name based on filename. The goal is to return a name that can easily be matched with the output from .",
    "type": "function",
    "file_path": "scikit-learn\\build_tools\\check-meson-openmp-dependencies.py",
    "ast_data": "FunctionDef name:get_canonical_name_git_grep arg:filename arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "set_field_name",
    "source_code": "def set_field_name(self):\n    self.field_name = None",
    "docstring": "Set the related field's name, this is not available until later stages of app loading, so set_field_name is called from set_attributes_from_rel()",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\reverse_related.py",
    "ast_data": "FunctionDef name:set_field_name arg:self arguments arg Assign"
  },
  {
    "library": "scipy",
    "name": "_read_body_coo",
    "source_code": "def _read_body_coo(cursor, generalize_symmetry=True):\n    from . import _fmm_core\n    index_dtype = 'int32'\n    if cursor.header.nrows >= 2 ** 31 or cursor.header.ncols >= 2 ** 31:\n        index_dtype = 'int64'\n    i = np.zeros(cursor.header.nnz, dtype=index_dtype)\n    j = np.zeros(cursor.header.nnz, dtype=index_dtype)\n    data = np.zeros(cursor.header.nnz, dtype=_field_to_dtype.get(cursor.header.field))\n    _fmm_core.read_body_coo(cursor, i, j, data)\n    if generalize_symmetry and cursor.header.symmetry != 'general':\n        off_diagonal_mask = i != j\n        off_diagonal_rows = i[off_diagonal_mask]\n        off_diagonal_cols = j[off_diagonal_mask]\n        off_diagonal_data = data[off_diagonal_mask]\n        if cursor.header.symmetry == 'skew-symmetric':\n            off_diagonal_data *= -1\n        elif cursor.header.symmetry == 'hermitian':\n            off_diagonal_data = off_diagonal_data.conjugate()\n        i = np.concatenate((i, off_diagonal_cols))\n        j = np.concatenate((j, off_diagonal_rows))\n        data = np.concatenate((data, off_diagonal_data))\n    return ((data, (i, j)), cursor.header.shape)",
    "docstring": "Read MatrixMarket coordinate body",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_fast_matrix_market\\__init__.py",
    "ast_data": "FunctionDef name:_read_body_coo arg:cursor arg:generalize_symmetry arguments arg arg Assign If BoolOp Compare Compare Assign Assign Call Assign Call Assign Call Call Call If BoolOp Compare Assign Compare Assign Assign Assign If Compare If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "feed_extra_kwargs",
    "source_code": "def feed_extra_kwargs(self, obj):\n    return {}",
    "docstring": "Return an extra keyword arguments dictionary that is used when initializing the feed generator.",
    "type": "method",
    "file_path": "django\\django\\contrib\\syndication\\views.py",
    "ast_data": "FunctionDef name:feed_extra_kwargs arg:self arg:obj arguments arg arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "output",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef output(self, target: 'Target', args: tuple[Argument, ...], kwargs: dict[str, Any]) -> Any:\n    return args[0]",
    "docstring": "Execute an `Node `__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation Return: Any: The return value referenced by the output node",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\interpreter.py",
    "ast_data": "FunctionDef name:output arg:self arg:target arg:args arg:kwargs arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "HasMethods",
    "source_code": "class HasMethods(_Constraint):\n\n    @validate_params({'methods': [str, list]}, prefer_skip_nested_validation=True)\n    def __init__(self, methods):\n        super().__init__()\n        if isinstance(methods, str):\n            methods = [methods]\n        self.methods = methods\n\n    def is_satisfied_by(self, val):\n        return all((callable(getattr(val, method, None)) for method in self.methods))\n\n    def __str__(self):\n        if len(self.methods) == 1:\n            methods = f'{self.methods[0]!r}'\n        else:\n            methods = f'{', '.join([repr(m) for m in self.methods[:-1]])} and {self.methods[-1]!r}'\n        return f'an object implementing {methods}'",
    "docstring": "Constraint representing objects that expose specific methods. It is useful for parameters following a protocol and where we don't want to impose an affiliation to a specific module or class. Parameters ---------- methods : str or list of str The method(s) that the object is expected to expose.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:HasMethods FunctionDef name:__init__ arg:self arg:methods arguments arg arg Call Call If Call Assign Assign Call FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes Call Call Call FunctionDef name:__str__ arg:self arguments arg If Compare Call Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Masking",
    "source_code": "class Masking(Layer):\n\n    def __init__(self, mask_value=0.0, **kwargs):\n        super(Masking, self).__init__(**kwargs)\n        self.supports_masking = True\n        self.mask_value = mask_value\n        self._compute_output_and_mask_jointly = True\n\n    def compute_mask(self, inputs, mask=None):\n        return K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1)\n\n    def call(self, inputs):\n        boolean_mask = K.any(math_ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)\n        outputs = inputs * math_ops.cast(boolean_mask, inputs.dtype)\n        outputs._keras_mask = array_ops.squeeze(boolean_mask, axis=-1)\n        return outputs\n\n    def compute_output_shape(self, input_shape):\n        return input_shape\n\n    def get_config(self):\n        config = {'mask_value': self.mask_value}\n        base_config = super(Masking, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Masks a sequence by using a mask value to skip timesteps. For each timestep in the input tensor (dimension #1 in the tensor), if all values in the input tensor at that timestep are equal to , then the timestep will be masked (skipped) in all downstream layers (as long as they support masking). If any downstream layer does not support masking yet receives such an input mask, an exception will be raised. Example: Consider a Numpy data array of shape , to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you lack data for these timesteps. You can: - Set and - Insert a layer with before the LSTM layer: See [the masking and padding guide]( for more details.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "ClassDef name:Masking FunctionDef name:__init__ arg:self arg:mask_value arguments arg arg arg Call Call Assign Assign Assign FunctionDef name:compute_mask arg:self arg:inputs arg:mask arguments arg arg arg Return return:yes Call Call FunctionDef name:call arg:self arg:inputs arguments arg arg Assign Call Call Assign Call Assign Call Return return:yes FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Return return:yes FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "OutputSpec",
    "source_code": "class OutputSpec:\n\n    def get_device(self) -> Optional[torch.device]:\n        raise NotImplementedError(type(self).__name__)\n\n    def storage_size(self) -> int:\n        raise NotImplementedError(type(self).__name__)",
    "docstring": "Abstract base for Layout, MultiOutputLayout, NoneLayout. Represents the memory layout of the output of an Operation.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "ClassDef name:OutputSpec FunctionDef name:get_device arg:self arguments arg Raise Call Call FunctionDef name:storage_size arg:self arguments arg Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "pin_memory",
    "source_code": "def pin_memory(self, device: Union[str, torch.device]='cuda'):\n    _warn_typed_storage_removal()\n    return self._new_wrapped_storage(self._untyped_storage.pin_memory(device=device))",
    "docstring": "Copy the CPU TypedStorage to pinned memory, if it's not already pinned. Args: device (str or torch.device): The device to pin memory on (default: ``). This argument is discouraged and subject to deprecated. Returns: A pinned CPU storage.",
    "type": "method",
    "file_path": "pytorch\\torch\\storage.py",
    "ast_data": "FunctionDef name:pin_memory arg:self arg:device arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_check_list_display_links",
    "source_code": "def _check_list_display_links(self, obj):\n    from django.contrib.admin.options import ModelAdmin\n    if obj.list_display_links is None:\n        return []\n    elif not isinstance(obj.list_display_links, (list, tuple)):\n        return must_be('a list, a tuple, or None', option='list_display_links', obj=obj, id='admin.E110')\n    elif obj.get_list_display.__func__ is ModelAdmin.get_list_display:\n        return list(chain.from_iterable((self._check_list_display_links_item(obj, field_name, 'list_display_links[%d]' % index) for index, field_name in enumerate(obj.list_display_links))))\n    return []",
    "docstring": "Check that list_display_links is a unique subset of list_display.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_list_display_links arg:self arg:obj arguments arg arg If Compare Return return:no If Call Return return:yes Call If Compare Return return:yes Call Call Call Call Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "_VerboseHelper",
    "source_code": "class _VerboseHelper(_Constraint):\n\n    def __init__(self):\n        super().__init__()\n        self._constraints = [Interval(Integral, 0, None, closed='left'), _InstancesOf(bool), _InstancesOf(np.bool_)]\n\n    def is_satisfied_by(self, val):\n        return any((c.is_satisfied_by(val) for c in self._constraints))\n\n    def __str__(self):\n        return f'{', '.join([str(c) for c in self._constraints[:-1]])} or {self._constraints[-1]}'",
    "docstring": "Helper constraint for the verbose parameter. Convenience class for [Interval(Integral, 0, None, closed=\"left\"), bool, numpy.bool_]",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:_VerboseHelper FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Call Call FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "strict_bounds",
    "source_code": "def strict_bounds(lb, ub, keep_feasible, n_vars):\n    strict_lb = np.resize(lb, n_vars).astype(float)\n    strict_ub = np.resize(ub, n_vars).astype(float)\n    keep_feasible = np.resize(keep_feasible, n_vars)\n    strict_lb[~keep_feasible] = -np.inf\n    strict_ub[~keep_feasible] = np.inf\n    return (strict_lb, strict_ub)",
    "docstring": "Remove bounds which are not asked to be kept feasible.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_constraints.py",
    "ast_data": "FunctionDef name:strict_bounds arg:lb arg:ub arg:keep_feasible arg:n_vars arguments arg arg arg arg Assign Call Call Assign Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "serialize_ssh_public_key",
    "source_code": "def serialize_ssh_public_key(public_key: SSHPublicKeyTypes) -> bytes:\n    if isinstance(public_key, dsa.DSAPublicKey):\n        warnings.warn('SSH DSA key support is deprecated and will be removed in a future release', utils.DeprecatedIn40, stacklevel=4)\n    key_type = _get_ssh_key_type(public_key)\n    kformat = _lookup_kformat(key_type)\n    f_pub = _FragList()\n    f_pub.put_sshstr(key_type)\n    kformat.encode_public(public_key, f_pub)\n    pub = binascii.b2a_base64(f_pub.tobytes()).strip()\n    return b''.join([key_type, b' ', pub])",
    "docstring": "One-line public key format for OpenSSH",
    "type": "function",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:serialize_ssh_public_key arg:public_key arguments arg If Call Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "DatelikeOps",
    "source_code": "class DatelikeOps(DatetimeLikeArrayMixin):\n\n    @Substitution(URL='https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior')\n    def strftime(self, date_format: str) -> npt.NDArray[np.object_]:\n        result = self._format_native_types(date_format=date_format, na_rep=np.nan)\n        if using_string_dtype():\n            from pandas import StringDtype\n            return pd_array(result, dtype=StringDtype(na_value=np.nan))\n        return result.astype(object, copy=False)",
    "docstring": "Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "ClassDef name:DatelikeOps FunctionDef name:strftime arg:self arg:date_format arguments arg arg Assign Call If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "ignore",
    "source_code": "def ignore(drop=False, **kwargs):\n    if callable(drop):\n        fn = drop\n        fn._torchscript_modifier = FunctionModifiers.IGNORE\n        return fn\n    if not isinstance(drop, bool):\n        raise RuntimeError(f'Argument to @torch.jit.ignore must be a bool or a function but got {drop}')\n    drop_on_export = kwargs.pop('drop_on_export', None)\n    if drop_on_export:\n        warnings.warn('ignore(drop_on_export=True) has been deprecated. TorchScript will now drop the function call on compilation. Use torch.jit.unused now. {}', category=FutureWarning)\n        drop = drop_on_export\n    elif drop:\n        warnings.warn('ignore(True) has been deprecated. TorchScript will now drop the function call on compilation. Use torch.jit.unused now. {}', category=FutureWarning)\n\n    def decorator(fn):\n        if drop:\n            fn._torchscript_modifier = FunctionModifiers.UNUSED\n        else:\n            fn._torchscript_modifier = FunctionModifiers.IGNORE\n        return fn\n    return decorator",
    "docstring": "This decorator indicates to the compiler that a function or method should be ignored and left as a Python function. This allows you to leave code in your model that is not yet TorchScript compatible. If called from TorchScript, ignored functions will dispatch the call to the Python interpreter. Models with ignored functions cannot be exported; use :func: instead. Example (using `debugger@ignoredebuggertraining_methodraise`. m.save(\"m.pt\") .. testcleanup:: import os os.remove('m.pt')",
    "type": "function",
    "file_path": "pytorch\\torch\\_jit_internal.py",
    "ast_data": "FunctionDef name:ignore arg:drop arguments arg arg If Call Assign Assign Return return:yes If Call Raise Call Assign Call If Call Assign If Call FunctionDef name:decorator arg:fn arguments arg If Assign Assign Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_rotate_command",
    "source_code": "def _get_rotate_command(lbrt):\n    l, b, r, t = lbrt\n    return f'{l + r:.2f} {0:.2f} translate\\n90 rotate'",
    "docstring": "Return a PostScript 90° rotation command for bounding box *lbrt*=(l, b, r, t).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:_get_rotate_command arg:lbrt arguments arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_make_copy_from_view",
    "source_code": "def _make_copy_from_view(fn):\n    aten_fn = getattr(aten, fn.__name__)\n    annotations = getattr(fn, '__annotations__', {})\n    fn = out_wrapper()(aten_fn)\n\n    @wraps(fn)\n    def _fn(*args, out=None, **kwargs):\n        result = fn(*args, out=out, **kwargs)\n        if out is not None:\n            return result\n        return pytree.tree_map(lambda x: x.clone(memory_format=torch.contiguous_format), result)\n    copy_name = f'{fn.__name__}_copy'\n    _fn.__name__ = copy_name\n    _fn.__annotations__.update(annotations)\n    register_decomposition(getattr(aten, copy_name))(_fn)\n    return _fn",
    "docstring": "Given a view function (e.g. torch.diagonal) generates its copy variant (e.g. torch.diagonal_copy)",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:_make_copy_from_view arg:fn arguments arg Assign Call Assign Call Assign Call Call FunctionDef name:_fn arguments arg arg arg Assign Call If Compare Return return:yes Return return:yes Call arguments arg Call Call Assign Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "skew",
    "source_code": "def skew(a, axis=0, bias=True):\n    a, axis = _chk_asarray(a, axis)\n    mean = a.mean(axis, keepdims=True)\n    m2 = _moment(a, 2, axis, mean=mean)\n    m3 = _moment(a, 3, axis, mean=mean)\n    zero = m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis)) ** 2\n    with np.errstate(all='ignore'):\n        vals = ma.where(zero, 0, m3 / m2 ** 1.5)\n    if not bias and zero is not ma.masked and (m2 is not ma.masked):\n        n = a.count(axis)\n        can_correct = ~zero & (n > 2)\n        if can_correct.any():\n            n = np.extract(can_correct, n)\n            m2 = np.extract(can_correct, m2)\n            m3 = np.extract(can_correct, m3)\n            nval = ma.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2 ** 1.5\n            np.place(vals, can_correct, nval)\n    return vals",
    "docstring": "Computes the skewness of a data set. Parameters ---------- a : ndarray data axis : int or None, optional Axis along which skewness is calculated. Default is 0. If None, compute over the whole array . bias : bool, optional If False, then the calculations are corrected for statistical bias. Returns ------- skewness : ndarray The skewness of values along an axis, returning 0 where all values are equal. Notes ----- For more details about , see .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:skew arg:a arg:axis arg:bias arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Compare Call Call With Call Assign Call If BoolOp Compare Compare Assign Call Assign Compare If Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_make_eager_execution_function",
    "source_code": "def _make_eager_execution_function(model, mode):\n\n    def _per_replica_function(model):\n        f = model._make_execution_function(mode)\n        return (f.inputs, f.outputs)\n    strategy = model._distribution_strategy\n    global_graph = backend.get_graph()\n    with global_graph.as_default(), strategy.scope():\n        with backend._scratch_graph(global_graph):\n            grouped = strategy.extended.call_for_each_replica(_per_replica_function, args=(get_distributed_model(model, mode),))\n            grouped_inputs, grouped_outputs = grouped\n            all_inputs, all_outputs, _, _ = unwrap_values(strategy, grouped_inputs, grouped_outputs, with_loss_tensor=mode != ModeKeys.PREDICT)\n        return backend.function(all_inputs, all_outputs, name='eager_distributed_{}_function'.format(mode))",
    "docstring": "Makes function to run one step of distributed model eager execution.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_make_eager_execution_function arg:model arg:mode arguments arg arg FunctionDef name:_per_replica_function arg:model arguments arg Assign Call Return return:yes Assign Assign Call With Call Call With Call Assign Call Call Assign Assign Call Compare Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "env",
    "source_code": "@property\ndef env(self) -> BuildEnvironment:\n    return self.document.settings.env",
    "docstring": "Reference to the :class: object.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "FunctionDef name:env arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "check_setitem_lengths",
    "source_code": "def check_setitem_lengths(indexer, value, values) -> bool:\n    no_op = False\n    if isinstance(indexer, (np.ndarray, list)):\n        if is_list_like(value):\n            if len(indexer) != len(value) and values.ndim == 1:\n                if isinstance(indexer, list):\n                    indexer = np.array(indexer)\n                if not (isinstance(indexer, np.ndarray) and indexer.dtype == np.bool_ and (indexer.sum() == len(value))):\n                    raise ValueError('cannot set using a list-like indexer with a different length than the value')\n            if not len(indexer):\n                no_op = True\n    elif isinstance(indexer, slice):\n        if is_list_like(value):\n            if len(value) != length_of_indexer(indexer, values) and values.ndim == 1:\n                raise ValueError('cannot set using a slice indexer with a different length than the value')\n            if not len(value):\n                no_op = True\n    return no_op",
    "docstring": "Validate that value and indexer are the same length. An special-case is allowed for when the indexer is a boolean array and the number of true values equals the length of ``. In this case, no exception is raised. Parameters ---------- indexer : sequence Key for the setitem. value : array-like Value for the setitem. values : array-like Values being set into. Returns ------- bool Whether this is an empty listlike setting which is a no-op. Raises ------ ValueError When the indexer is an ndarray or list and the lengths don't match.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexers\\utils.py",
    "ast_data": "FunctionDef name:check_setitem_lengths arg:indexer arg:value arg:values arguments arg arg arg Assign If Call If Call If BoolOp Compare Call Call Compare If Call Assign Call If BoolOp Call Compare Compare Call Call Raise Call If Call Assign If Call If Call If BoolOp Compare Call Call Compare Raise Call If Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_list_max_show_all",
    "source_code": "def _check_list_max_show_all(self, obj):\n    if not isinstance(obj.list_max_show_all, int):\n        return must_be('an integer', option='list_max_show_all', obj=obj, id='admin.E119')\n    else:\n        return []",
    "docstring": "Check that list_max_show_all is an integer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_list_max_show_all arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "get_error",
    "source_code": "def get_error(self):\n    self._check_status()\n    return (self._error_code, self._error_message)",
    "docstring": "Returns (TF Error Code, Error Message) from RPC Response. This call will block for RPC result.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\rpc\\rpc_ops.py",
    "ast_data": "FunctionDef name:get_error arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reopen",
    "source_code": "def reopen(self):\n    if self._closed:\n        self._closed = False\n        self._session.run(self._init_op)",
    "docstring": "Reopens the EventFileWriter. Can be called after to add more events in the same directory. The events will go into a new events file. Does nothing if the EventFileWriter was not closed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer_v2.py",
    "ast_data": "FunctionDef name:reopen arg:self arguments arg If Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "choose_from_datasets_v2",
    "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.choose_from_datasets(...)` instead. Note that, unlike the experimental endpoint, the non-experimental endpoint sets `stop_on_empty_dataset=True` by default. You should set this argument explicitly in case you would like to match the behavior of the experimental endpoint.')\n@tf_export('data.experimental.choose_from_datasets', v1=[])\ndef choose_from_datasets_v2(datasets, choice_dataset, stop_on_empty_dataset=False):\n    return dataset_ops.Dataset.choose_from_datasets(datasets=datasets, choice_dataset=choice_dataset, stop_on_empty_dataset=stop_on_empty_dataset)",
    "docstring": "Creates a dataset that deterministically chooses elements from . For example, given the following datasets: The elements of will be: Args: datasets: A non-empty list of objects with compatible structure. choice_dataset: A of scalar tensors between and . stop_on_empty_dataset: If , selection stops if it encounters an empty dataset. If , it skips empty datasets. It is recommended to set it to . Otherwise, the selected elements start off as the user intends, but may change as input datasets become empty. This can be difficult to detect since the dataset starts off looking correct. Default to for backward compatibility. Returns: A dataset that interleaves elements from according to the values of . Raises: TypeError: If or has the wrong type. ValueError: If is empty.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\interleave_ops.py",
    "ast_data": "FunctionDef name:choose_from_datasets_v2 arg:datasets arg:choice_dataset arg:stop_on_empty_dataset arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, vcenter=0, halfrange=None, clip=False):\n    super().__init__(vmin=None, vmax=None, clip=clip)\n    self._vcenter = vcenter\n    self.halfrange = halfrange",
    "docstring": "Normalize symmetrical data around a center (0 by default). Unlike , applies an equal rate of change around the center. Useful when mapping symmetrical data around a conceptual center e.g., data that range from -2 to 4, with 0 as the midpoint, and with equal rates of change around that midpoint. Parameters ---------- vcenter : float, default: 0 The data value that defines ``. This behavior is usually desirable, as colormaps can mark these *under* and *over* values with specific colors. If clipping is on, values below *vmin* are mapped to 0 and values above *vmax* are mapped to 1. Such values become indistinguishable from regular boundary values, which may cause misinterpretation of the data. Examples -------- This maps data values -2 to 0.25, 0 to 0.5, and 4 to 1.0 (assuming equal rates of change above and below 0.0): >>> import matplotlib.colors as mcolors >>> norm = mcolors.CenteredNorm(halfrange=4.0) >>> data = [-2., 0., 4.] >>> norm(data) array([0.25, 0.5 , 1. ])",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:vcenter arg:halfrange arg:clip arguments arg arg arg arg Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "experimental_between_graph",
    "source_code": "@property\ndef experimental_between_graph(self):\n    raise NotImplementedError('must be implemented in descendants')",
    "docstring": "Whether the strategy uses between-graph replication or not. This is expected to return a constant value that will not be changed throughout its life cycle.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:experimental_between_graph arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "is_url",
    "source_code": "def is_url(self, template):\n    if ':' not in template:\n        return False\n    scheme = template.split(':', 1)[0].lower()\n    return scheme in self.url_schemes",
    "docstring": "Return True if the name looks like a URL.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\templates.py",
    "ast_data": "FunctionDef name:is_url arg:self arg:template arguments arg arg If Compare Return return:yes Assign Call Call Return return:yes Compare"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, num_dims: int, num_freqs: int, log_space: bool=False) -> None:\n    super().__init__()\n    self._num_dims = num_dims\n    self._embed_fns = [lambda x: x]\n    freq_bands: Tensor\n    if log_space:\n        freq_bands = 2.0 ** torch.linspace(0.0, num_freqs - 1, num_freqs)\n    else:\n        freq_bands = torch.linspace(2.0 ** 0.0, 2.0 ** (num_freqs - 1), num_freqs)\n    for freq in freq_bands:\n        self._embed_fns.append(partial(_torch_sin, freq=freq))\n        self._embed_fns.append(partial(_torch_cos, freq=freq))\n    self._num_encoded_dims = self._num_dims * len(self._embed_fns)",
    "docstring": "Initialize positional encoder. Args: num_dims: Number of input dimensions (channels): int num_freqs: Number of frequency bands for encoding span: int log_space: Whether frequency sampling should be log spaced: bool",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\positional_encoder.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_dims arg:num_freqs arg:log_space arguments arg arg arg arg Call Call Assign Assign arguments arg If Assign Call Assign Call For Call Call Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get_signature_def_map",
    "source_code": "def get_signature_def_map(saved_model_dir, tag_set):\n    meta_graph = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)\n    return meta_graph.signature_def",
    "docstring": "Gets SignatureDef map from a MetaGraphDef in a SavedModel. Returns the SignatureDef map for the given tag-set in the SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect or execute. tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Returns: A SignatureDef map that maps from string keys to SignatureDefs.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:get_signature_def_map arg:saved_model_dir arg:tag_set arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_source_fields",
    "source_code": "def get_source_fields(self):\n    return [e._output_field_or_none for e in self.get_source_expressions()]",
    "docstring": "Return the underlying field types used by this aggregate.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\expressions.py",
    "ast_data": "FunctionDef name:get_source_fields arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "edit_outer_margin_mins",
    "source_code": "def edit_outer_margin_mins(self, margin, ss):\n    self.edit_margin_min('left', margin['left'], ss.colspan.start)\n    self.edit_margin_min('leftcb', margin['leftcb'], ss.colspan.start)\n    self.edit_margin_min('right', margin['right'], ss.colspan.stop - 1)\n    self.edit_margin_min('rightcb', margin['rightcb'], ss.colspan.stop - 1)\n    self.edit_margin_min('top', margin['top'], ss.rowspan.start)\n    self.edit_margin_min('topcb', margin['topcb'], ss.rowspan.start)\n    self.edit_margin_min('bottom', margin['bottom'], ss.rowspan.stop - 1)\n    self.edit_margin_min('bottomcb', margin['bottomcb'], ss.rowspan.stop - 1)",
    "docstring": "Edit all four margin minimums in one statement. Parameters ---------- margin : dict size of margins in a dict with keys 'left', 'right', 'bottom', 'top' ss : SubplotSpec defines the subplotspec these margins should be applied to",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:edit_outer_margin_mins arg:self arg:margin arg:ss arguments arg arg arg Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_tensor_binary_crossentropy",
    "source_code": "@dispatch.dispatch_for_types(binary_crossentropy, ragged_tensor.RaggedTensor)\ndef _ragged_tensor_binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0, axis=-1):\n    fn = functools.partial(binary_crossentropy, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis)\n    return _ragged_tensor_apply_loss(fn, y_true, y_pred)",
    "docstring": "Implements support for handling RaggedTensors. Args: y_true: Tensor of one-hot true targets. y_pred: Tensor of predicted targets. from_logits: Whether is expected to be a logits tensor. By default, we assume that encodes a probability distribution. label_smoothing: Float in [0, 1]. If > then smooth the labels. For example, if , use for non-target labels and for target labels. axis: Axis along which to compute crossentropy. Returns: Binary crossentropy loss value. Expected shape: (batch, sequence_len) with sequence_len being variable per batch. Return shape: (batch,); returns the per batch mean of the loss values. When used by BinaryCrossentropy() with the default reduction (SUM_OVER_BATCH_SIZE), the reduction averages the per batch losses over the number of batches.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:_ragged_tensor_binary_crossentropy arg:y_true arg:y_pred arg:from_logits arg:label_smoothing arg:axis arguments arg arg arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_weighted_sum",
    "source_code": "def _create_weighted_sum(column, builder, units, sparse_combiner, weight_collections, trainable, weight_var=None):\n    if isinstance(column, _CategoricalColumn):\n        return _create_categorical_column_weighted_sum(column=column, builder=builder, units=units, sparse_combiner=sparse_combiner, weight_collections=weight_collections, trainable=trainable, weight_var=weight_var)\n    else:\n        return _create_dense_column_weighted_sum(column=column, builder=builder, units=units, weight_collections=weight_collections, trainable=trainable, weight_var=weight_var)",
    "docstring": "Creates a weighted sum for a dense/categorical column for linear_model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_create_weighted_sum arg:column arg:builder arg:units arg:sparse_combiner arg:weight_collections arg:trainable arg:weight_var arguments arg arg arg arg arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "can_inline_constant",
    "source_code": "@staticmethod\ndef can_inline_constant(t: torch.Tensor) -> bool:\n    return len(t.shape) == 1 and t.shape[0] <= 8",
    "docstring": "True if this is a small constant attr that will be inlined.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\graph.py",
    "ast_data": "FunctionDef name:can_inline_constant arg:t arguments arg Return return:yes BoolOp Compare Call Compare"
  },
  {
    "library": "scipy",
    "name": "f3",
    "source_code": "def f3(x):\n    return x * (x - 1.0) * (x - 2.0) * (x - 3.0)",
    "docstring": "A quartic with roots at 0, 1, 2 and 3",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:f3 arg:x arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "SlicingOpLambda",
    "source_code": "class SlicingOpLambda(TFOpLambda):\n\n    @trackable.no_automatic_dependency_tracking\n    def __init__(self, function, **kwargs):\n        super(SlicingOpLambda, self).__init__(function, **kwargs)\n        original_call = self.call\n\n        def _call_wrapper(*args, **kwargs):\n            new_args = []\n            for arg in args:\n                arg = _dict_to_slice(arg)\n                if isinstance(arg, (list, tuple)):\n                    new_arg = []\n                    for sub_arg in arg:\n                        new_arg.append(_dict_to_slice(sub_arg))\n                    arg = new_arg\n                new_args.append(arg)\n            new_kwargs = {}\n            for key, value in kwargs.items():\n                value = _dict_to_slice(value)\n                if isinstance(value, (list, tuple)):\n                    new_value = []\n                    for v in value:\n                        new_value.append(_dict_to_slice(v))\n                    value = new_value\n                new_kwargs[key] = value\n            return original_call(*new_args, **new_kwargs)\n        self.call = tf_decorator.make_decorator(original_call, _call_wrapper)",
    "docstring": "Wraps TF API symbols in a object. It is inserted by the Functional API construction whenever users call a supported TF symbol on KerasTensors. Like Lambda layers, this layer tries to raise warnings when it detects users explicitly use variables in the call. (To let them know that the layer will not capture the variables). This is useful in the case where users do something like: x = keras.Input(...) y = tf.Variable(...) out = x * tf_variable",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "ClassDef name:SlicingOpLambda FunctionDef name:__init__ arg:self arg:function arguments arg arg arg Call Call Assign FunctionDef name:_call_wrapper arguments arg arg Assign For Assign Call If Call Assign For Call Call Assign Call Assign For Call Assign Call If Call Assign For Call Call Assign Assign Return return:yes Call Assign Call"
  },
  {
    "library": "numpy",
    "name": "packbits",
    "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)\ndef packbits(a, axis=None, bitorder='big'):\n    return (a,)",
    "docstring": "packbits(a, /, axis=None, bitorder='big') Packs the elements of a binary-valued array into bits in a uint8 array. The result is padded to full bytes by inserting zero bits at the end. Parameters ---------- a : array_like An array of integers or booleans whose elements should be packed to bits. axis : int, optional The dimension over which bit-packing is done. `packedaxis` is None, in which case the output is 1-D). See Also -------- unpackbits: Unpacks elements of a uint8 array into a binary-valued output array. Examples -------- >>> import numpy as np >>> a = np.array([[[1,0,1], ... [0,1,0]], ... [[1,1,0], ... [0,0,1]]]) >>> b = np.packbits(a, axis=-1) >>> b array([[[160], [ 64]], [[192], [ 32]]], dtype=uint8) Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, and 32 = 0010 0000.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\multiarray.py",
    "ast_data": "FunctionDef name:packbits arg:a arg:axis arg:bitorder arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "lang_stats",
    "source_code": "def lang_stats(resources=None, languages=None, verbosity=0):\n    locale_dirs = _get_locale_dirs(resources)\n    for name, dir_ in locale_dirs:\n        print(\"\\nShowing translations stats for '%s':\" % name)\n        langs = sorted((d for d in os.listdir(dir_) if not d.startswith('_')))\n        for lang in langs:\n            if languages and lang not in languages:\n                continue\n            po_path = '{path}/{lang}/LC_MESSAGES/django{ext}.po'.format(path=dir_, lang=lang, ext='js' if name.endswith('-js') else '')\n            p = run(['msgfmt', '-vc', '-o', '/dev/null', po_path], capture_output=True, env={'LANG': 'C'}, encoding='utf-8', verbosity=verbosity)\n            if p.returncode == 0:\n                print('%s: %s' % (lang, p.stderr.strip()))\n            else:\n                print('Errors happened when checking %s translation for %s:\\n%s' % (lang, name, p.stderr))",
    "docstring": "Output language statistics of committed translation files for each Django catalog. If resources is provided, it should be a list of translation resource to limit the output (e.g. ['core', 'gis']).",
    "type": "function",
    "file_path": "django\\scripts\\manage_translations.py",
    "ast_data": "FunctionDef name:lang_stats arg:resources arg:languages arg:verbosity arguments arg arg arg Assign Call For Call Assign Call Call Call For If BoolOp Compare Assign Call Call Assign Call If Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_map_blocks_to_lifted_attrs",
    "source_code": "def _map_blocks_to_lifted_attrs(entry):\n    arguments: set[str] = set()\n    for node in entry.nodes():\n        for block in node.blocks():\n            arguments = arguments.union(_map_blocks_to_lifted_attrs(block))\n        if node.kind() == 'prim::GetAttr':\n            irv_name = node.output().debugName()\n            if irv_name not in set(node_to_parent_map.values()):\n                arguments.add(construct_fqn(irv_name, node_to_parent_map, node_to_attr_name))\n    if not isinstance(entry, torch._C.Graph):\n        blocks_to_lifted_attrs[entry] = arguments\n    return arguments",
    "docstring": "Walk the graph in a bottom-up fashion to build the expected to be lifted arguments for each block.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\converter.py",
    "ast_data": "FunctionDef name:_map_blocks_to_lifted_attrs arg:entry arguments arg Call For Call For Call Assign Call Call If Compare Call Assign Call Call If Compare Call Call Call Call If Call Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_version_predates",
    "source_code": "def _version_predates(lib: ModuleType, version: str) -> bool:\n    return Version(lib.__version__) < Version(version)",
    "docstring": "Helper function for checking version compatibility.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:_version_predates arg:lib arg:version arguments arg arg Return return:yes Compare Call Call"
  },
  {
    "library": "django",
    "name": "validate",
    "source_code": "def validate(self):\n    capi.srs_validate(self.ptr)",
    "docstring": "Check to see if the given spatial reference is valid.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:validate arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "range_dimension",
    "source_code": "@property\ndef range_dimension(self):\n    if self.shape.dims:\n        return self.shape.dims[-2]\n    else:\n        return tensor_shape.Dimension(None)",
    "docstring": "Dimension (in the sense of vector spaces) of the range of this operator. If this operator acts like the batch matrix with , then this returns . Returns: object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:range_dimension arg:self arguments arg If Return return:yes Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "hg",
    "source_code": "def hg(cfg: HourglassConfig) -> HourglassNet:\n    return HourglassNet(Bottleneck2D, head=cfg.head, depth=cfg.depth, num_stacks=cfg.num_stacks, num_blocks=cfg.num_blocks, num_classes=cfg.num_classes, input_channels=cfg.input_channels)",
    "docstring": "Create HourglassNet.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\sold2\\backbones.py",
    "ast_data": "FunctionDef name:hg arg:cfg arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "@dispatch.dispatch_for_types(array_ops.size, StructuredTensor)\ndef size(input, name=None, out_type=None):\n    if out_type is None:\n        if flags.config().tf_shape_default_int64.value():\n            out_type = dtypes.int64\n        else:\n            out_type = dtypes.int32\n    with ops.name_scope(name, 'size', [input]) as name:\n        if not input.row_partitions:\n            if input.nrows() is not None:\n                return math_ops.cast(input.nrows(), out_type)\n            else:\n                return math_ops.cast(1, out_type)\n        nvals = input.row_partitions[-1].nvals()\n        if nvals is None or out_type is None:\n            return nvals\n        return math_ops.cast(nvals, dtype=out_type)",
    "docstring": "Returns the size of a tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py",
    "ast_data": "FunctionDef name:size arg:input arg:name arg:out_type arguments arg arg arg If Compare If Call Call Assign Assign With Call If If Compare Call Return return:yes Call Call Return return:yes Call Assign Call If BoolOp Compare Compare Return return:yes Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "dont_skip_tracing",
    "source_code": "def dont_skip_tracing(fn=None):\n    ctx = patch_dynamo_config(dont_skip_tracing=True)\n    if fn:\n        return ctx(fn)\n    return ctx",
    "docstring": "Context manager/decorator to trace into functions intentionally marked by developers to be skipped when tracing. This decorator will also apply to recursively invoked functions.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\decorators.py",
    "ast_data": "FunctionDef name:dont_skip_tracing arg:fn arguments arg Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_merge_with",
    "source_code": "def _merge_with(self, other: 'DynamicRaggedShape') -> 'DynamicRaggedShape':\n    max_num_row_partitions = max(self.num_row_partitions, other.num_row_partitions)\n    a = self._with_num_row_partitions(max_num_row_partitions)\n    b = other._with_num_row_partitions(max_num_row_partitions)\n    new_row_partitions = [rp_a._merge_precomputed_encodings(rp_b) for rp_a, rp_b in zip(a._row_partitions, b._row_partitions)]\n    new_dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64\n    new_static_inner_shape = a._static_inner_shape.merge_with(b._static_inner_shape)\n    new_inner_shape = a._inner_shape\n    return DynamicRaggedShape(new_row_partitions, new_inner_shape, new_dtype, True, new_static_inner_shape)",
    "docstring": "Merge two shapes that are equal modulo num_row_partitions. The resulting num_row_partitions is the maximum of the two num_row_partitions. Args: other: a DynamicRaggedShape representing the same shape with a possibly different number of row partitions. Returns: A DynamicRaggedShape with the same shape and the maximum of the num_row_partitions of the two shapes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_merge_with arg:self arg:other arguments arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Compare Assign Call Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "getlist",
    "source_code": "def getlist(self, key, default=None):\n    return self._getlist(key, default, force_list=True)",
    "docstring": "Return the list of values for the key. If key doesn't exist, return a default value.",
    "type": "method",
    "file_path": "django\\django\\utils\\datastructures.py",
    "ast_data": "FunctionDef name:getlist arg:self arg:key arg:default arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "all",
    "source_code": "def all(self, *, skipna: bool=True, axis: AxisInt | None=0, **kwargs) -> np.bool_ | NAType:\n    nv.validate_all((), kwargs)\n    values = self._data.copy()\n    np.putmask(values, self._mask, self.dtype._truthy_value)\n    result = values.all(axis=axis)\n    if skipna:\n        return result\n    elif not result or len(self) == 0 or (not self._mask.any()):\n        return result\n    else:\n        return self.dtype.na_value",
    "docstring": "Return whether all elements are truthy. Returns True unless there is at least one element that is falsey. By default, NAs are skipped. If `Kleene logic skipnaskipnapandas.NA` is True or False influences the result): >>> pd.array([True, True, pd.NA]).all(skipna=False) >>> pd.array([1, 1, pd.NA]).all(skipna=False) >>> pd.array([True, False, pd.NA]).all(skipna=False) np.False_ >>> pd.array([1, 0, pd.NA]).all(skipna=False) np.False_",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\masked.py",
    "ast_data": "FunctionDef name:all arg:self arguments arg arg arg arg Call Assign Call Call Assign Call If Return return:yes If BoolOp Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "buffers",
    "source_code": "def buffers(self, recurse: bool=True) -> Iterator[Tensor]:\n    for _, buf in self.named_buffers(recurse=recurse):\n        yield buf",
    "docstring": "Return an iterator over module buffers. Args: recurse (bool): if True, then yields buffers of this module and all submodules. Otherwise, yields only buffers that are direct members of this module. Yields: torch.Tensor: module buffer Example:: >>> # xdoctest: +SKIP(\"undefined vars\") >>> for buf in model.buffers(): >>> print(type(buf), buf.size()) (20L,) (20L, 1L, 5L, 5L)",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:buffers arg:self arg:recurse arguments arg arg For Call"
  },
  {
    "library": "matplotlib",
    "name": "Add",
    "source_code": "class Add(_Base):\n\n    def __init__(self, a, b):\n        self._a = a\n        self._b = b\n\n    def get_size(self, renderer):\n        a_rel_size, a_abs_size = self._a.get_size(renderer)\n        b_rel_size, b_abs_size = self._b.get_size(renderer)\n        return (a_rel_size + b_rel_size, a_abs_size + b_abs_size)",
    "docstring": "Sum of two sizes.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py",
    "ast_data": "ClassDef name:Add FunctionDef name:__init__ arg:self arg:a arg:b arguments arg arg arg Assign Assign FunctionDef name:get_size arg:self arg:renderer arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_should_act_as_resource_variable",
    "source_code": "def _should_act_as_resource_variable(self):\n    pass",
    "docstring": "Pass resource_variable_ops.is_resource_variable check.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:_should_act_as_resource_variable arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "_list_of_dict_to_arrays",
    "source_code": "def _list_of_dict_to_arrays(data: list[dict], columns: Index | None) -> tuple[np.ndarray, Index]:\n    if columns is None:\n        gen = (list(x.keys()) for x in data)\n        sort = not any((isinstance(d, dict) for d in data))\n        pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort)\n        columns = ensure_index(pre_cols)\n    data = [d if type(d) is dict else dict(d) for d in data]\n    content = lib.dicts_to_array(data, list(columns))\n    return (content, columns)",
    "docstring": "Convert list of dicts to numpy arrays if is not passed, column names are inferred from the records - for OrderedDict and dicts, the column names match the key insertion-order from the first record to the last. - For other kinds of dict-likes, the keys are lexically sorted. Parameters ---------- data : iterable collection of records (OrderedDict, dict) columns: iterables or None Returns ------- content : np.ndarray[object, ndim=2] columns : Index",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\construction.py",
    "ast_data": "FunctionDef name:_list_of_dict_to_arrays arg:data arg:columns arguments arg arg If Compare Assign Call Call Assign Call Call Assign Call Assign Call Assign Compare Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_identify_group",
    "source_code": "def _identify_group(self, key: str, append: bool) -> Node:\n    group = self.get_node(key)\n    assert self._handle is not None\n    if group is not None and (not append):\n        self._handle.remove_node(group, recursive=True)\n        group = None\n    if group is None:\n        group = self._create_nodes_and_group(key)\n    return group",
    "docstring": "Identify HDF5 group based on key, delete/create group if needed.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:_identify_group arg:self arg:key arg:append arguments arg arg arg Assign Call Compare If BoolOp Compare Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "now",
    "source_code": "def now():\n    return datetime.now(tz=UTC if settings.USE_TZ else None)",
    "docstring": "Return an aware or naive datetime.datetime, depending on settings.USE_TZ.",
    "type": "function",
    "file_path": "django\\django\\utils\\timezone.py",
    "ast_data": "FunctionDef name:now arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "stream",
    "source_code": "def stream(stream: Optional['torch.mtia.Stream']) -> StreamContext:\n    return StreamContext(stream)",
    "docstring": "Wrap around the Context-manager StreamContext that selects a given stream. Arguments: stream (Stream): selected stream. This manager is a no-op if it's ``. .. note:: In eager mode stream is of type Stream class while in JIT it doesn't support torch.mtia.stream",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:stream arg:stream arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "tuple",
    "source_code": "@tf_export(v1=['tuple'])\n@dispatch.add_dispatch_support\ndef tuple(tensors, name=None, control_inputs=None):\n    if context.executing_eagerly():\n        return tensors\n    with ops.name_scope(name, 'tuple', tensors) as name:\n        tensors = [t if isinstance(t, ops.Operation) or tensor_util.is_tf_type(t) or t is None else ops.convert_to_tensor(t) for t in tensors]\n        gating_ops = [t if isinstance(t, ops.Operation) else t.op for t in tensors if t is not None]\n        if control_inputs:\n            for c in control_inputs:\n                if isinstance(c, tensor_lib.Tensor):\n                    c = c.op\n                elif not isinstance(c, ops.Operation):\n                    raise TypeError(f\"'control_inputs' must only contain Operation or Tensor. Received: {type(c)}\")\n                gating_ops.append(c)\n        gating_ops = sorted(set(gating_ops), key=lambda op: op._id)\n        if not gating_ops:\n            raise ValueError(f\"'tensors' must have at least one Tensor. Received: {tensors}.\")\n        gate = group(*gating_ops)\n        tpl = []\n        for t in tensors:\n            if tensor_util.is_tf_type(t):\n                tpl.append(with_dependencies([gate], t))\n            elif isinstance(t, ops.Operation):\n                with ops.control_dependencies([gate]):\n                    tpl.append(group(t))\n            else:\n                tpl.append(None)\n        return tpl",
    "docstring": "Group tensors together. This creates a tuple of tensors with the same values as the argument, except that the value of each tensor is only returned after the values of all tensors have been computed. contains additional ops that have to finish before this op finishes, but whose outputs are not returned. This can be used as a \"join\" mechanism for parallel computations: all the argument tensors can be computed in parallel, but the values of any tensor returned by are only available after all the parallel computations are done. See also and . Args: tensors: A list of s or , some entries can be . name: (optional) A name to use as a for the operation. control_inputs: List of additional ops to finish before returning. Returns: Same as . Raises: ValueError: If does not contain any or . TypeError: If is not a list of or objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:tuple arg:tensors arg:name arg:control_inputs arguments arg arg arg If Call Return return:yes With Call Assign BoolOp Call Call Compare Call Assign Call Compare If For If Call Assign If Call Raise Call Call Call Assign Call Call arguments arg If Raise Call Assign Call Assign For If Call Call Call If Call With Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "f",
    "source_code": "def f(self, node, *args, **kwargs):\n    return partial(op_class, op_symbol, *args, **kwargs)",
    "docstring": "Return a partial function with an Op subclass with an operator already passed. Returns ------- callable",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expr.py",
    "ast_data": "FunctionDef name:f arg:self arg:node arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "use_mem_pool",
    "source_code": "@contextlib.contextmanager\ndef use_mem_pool(pool: MemPool, device: 'Device'=None):\n    ctx = MemPoolContext(pool)\n    device_index = torch.cuda.current_device() if device is None else _get_device_index(device)\n    _cuda_beginAllocateCurrentThreadToPool(device_index, pool.id)\n    try:\n        yield\n    finally:\n        _cuda_endAllocateToPool(device_index, pool.id)\n        _cuda_releasePool(device_index, pool.id)\n        del ctx",
    "docstring": "A context manager that routes allocations to a given pool. Args: pool(torch.cuda.MemPool): a MemPool object to be made active so that allocations route to this pool. device (torch.device or int, optional): selected device. Uses MemPool on the current device, given by :func:, if :attr: is `` (default). .. note:: This context manager makes only current thread's allocations route to the given pool. If a new thread is spawned inside the context manager (e.g. by calling backward) the allocations in that thread will not route to the given pool.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:use_mem_pool arg:pool arg:device arguments arg arg Assign Call Assign Compare Call Call Call Try Call Call"
  },
  {
    "library": "tensorflow",
    "name": "parse_expression",
    "source_code": "def parse_expression(src):\n    src = STANDARD_PREAMBLE + src.strip()\n    node = parse(src, preamble_len=STANDARD_PREAMBLE_LEN, single_node=True)\n    if __debug__:\n        if not isinstance(node, gast.Expr):\n            raise ValueError('expected exactly one node of type Expr, got {}'.format(node))\n    return node.value",
    "docstring": "Returns the AST of given identifier. Args: src: A piece of code that represents a single Python expression Returns: A gast.AST object. Raises: ValueError: if src does not consist of a single Expression.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\parser.py",
    "ast_data": "FunctionDef name:parse_expression arg:src arguments arg Assign Call Assign Call If If Call Raise Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_builder",
    "source_code": "def add_builder(self, builder: type[Builder], override: bool=False) -> None:\n    self.registry.add_builder(builder, override=override)",
    "docstring": "Register a new builder. :param builder: A builder class :param override: If true, install the builder forcedly even if another builder is already installed as the same name .. versionchanged:: 1.8 Add *override* keyword.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_builder arg:self arg:builder arg:override arguments arg arg arg Call"
  },
  {
    "library": "authlib",
    "name": "validate_request_object_encryption_alg",
    "source_code": "def validate_request_object_encryption_alg(self):\n    self._validate_claim_value('request_object_encryption_alg')",
    "docstring": "JWE [JWE] alg algorithm [JWA] the RP is declaring that it may use for encrypting Request Objects sent to the OP. This parameter SHOULD be included when symmetric encryption will be used, since this signals to the OP that a client_secret value needs to be returned from which the symmetric key will be derived, that might not otherwise be returned. The RP MAY still use other supported encryption algorithms or send unencrypted Request Objects, even when this parameter is present. If both signing and encryption are requested, the Request Object will be signed then encrypted, with the result being a Nested JWT, as defined in [JWT]. The default, if omitted, is that the RP is not declaring whether it might encrypt any Request Objects.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_request_object_encryption_alg arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "fetch_args_kwargs_from_env",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef fetch_args_kwargs_from_env(self, n: Node) -> tuple[tuple, dict]:\n    args = self.map_nodes_to_values(n.args, n)\n    assert isinstance(args, tuple)\n    kwargs = self.map_nodes_to_values(n.kwargs, n)\n    assert isinstance(kwargs, dict)\n    return (args, kwargs)",
    "docstring": "Fetch the concrete values of ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\interpreter.py",
    "ast_data": "FunctionDef name:fetch_args_kwargs_from_env arg:self arg:n arguments arg arg Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "all_paths",
    "source_code": "def all_paths(self, src: str, dst: str) -> str:\n    return self.dependency_graph.all_paths(src, dst)",
    "docstring": "Return a dot representation of the subgraph that has all paths from src to dst. Returns: A dot representation containing all paths from src to dst. (",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "FunctionDef name:all_paths arg:self arg:src arg:dst arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_group_permissions",
    "source_code": "def get_group_permissions(self, user_obj, obj=None):\n    return self._get_permissions(user_obj, obj, 'group')",
    "docstring": "Return a set of permission strings the user has from the groups they belong.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\backends.py",
    "ast_data": "FunctionDef name:get_group_permissions arg:self arg:user_obj arg:obj arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_get_validation_exclusions",
    "source_code": "def _get_validation_exclusions(self):\n    exclude = set()\n    for f in self.instance._meta.fields:\n        field = f.name\n        if field not in self.fields:\n            exclude.add(f.name)\n        elif self._meta.fields and field not in self._meta.fields:\n            exclude.add(f.name)\n        elif self._meta.exclude and field in self._meta.exclude:\n            exclude.add(f.name)\n        elif field in self._errors:\n            exclude.add(f.name)\n        else:\n            form_field = self.fields[field]\n            field_value = self.cleaned_data.get(field)\n            if not f.blank and (not form_field.required) and (field_value in form_field.empty_values):\n                exclude.add(f.name)\n    return exclude",
    "docstring": "For backwards-compatibility, exclude several types of fields from model validation. See tickets #12507, #12521, #12553.",
    "type": "method",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:_get_validation_exclusions arg:self arguments arg Assign Call For Assign If Compare Call If BoolOp Compare Call If BoolOp Compare Call If Compare Call Assign Assign Call If BoolOp Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scatter_mul",
    "source_code": "def scatter_mul(self, sparse_delta, use_locking=False, name=None):\n    raise NotImplementedError",
    "docstring": "Multiply this variable by . Args: sparse_delta: to multiply this variable by. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:scatter_mul arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "_quant_node_constraint",
    "source_code": "def _quant_node_constraint(n: Node) -> bool:\n    return n.op == 'call_function' and n.target in _QUANT_OPS",
    "docstring": "If there is any pure ops between get_attr and quantize op they will be const propagated e.g. get_attr(weight) -> transpose -> quantize -> dequantize* (Note: dequantize op is not going to be constant propagated) This filter is added because we don't want to constant fold the things that are not related to quantization",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize_pt2e.py",
    "ast_data": "FunctionDef name:_quant_node_constraint arg:n arguments arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, filenames, record_bytes, header_bytes=None, footer_bytes=None, buffer_size=None, compression_type=None, num_parallel_reads=None, name=None):\n    filenames = _create_or_validate_filenames_dataset(filenames, name=name)\n    self._filenames = filenames\n    self._record_bytes = record_bytes\n    self._header_bytes = header_bytes\n    self._footer_bytes = footer_bytes\n    self._buffer_size = buffer_size\n    self._compression_type = compression_type\n\n    def creator_fn(filename):\n        return _FixedLengthRecordDataset(filename, record_bytes, header_bytes, footer_bytes, buffer_size, compression_type, name=name)\n    self._impl = _create_dataset_reader(creator_fn, filenames, num_parallel_reads, name=name)\n    variant_tensor = self._impl._variant_tensor\n    super(FixedLengthRecordDatasetV2, self).__init__(variant_tensor)",
    "docstring": "Creates a . Args: filenames: A tensor or containing one or more filenames. record_bytes: A scalar representing the number of bytes in each record. header_bytes: (Optional.) A scalar representing the number of bytes to skip at the start of a file. footer_bytes: (Optional.) A scalar representing the number of bytes to ignore at the end of a file. buffer_size: (Optional.) A scalar representing the number of bytes to buffer when reading. compression_type: (Optional.) A scalar evaluating to one of (no compression), , or . num_parallel_reads: (Optional.) A scalar representing the number of files to read in parallel. If greater than one, the records of files read in parallel are outputted in an interleaved order. If your input pipeline is I/O bottlenecked, consider setting this parameter to a value greater than one to parallelize the I/O. If , files will be read sequentially. name: (Optional.) A name for the tf.data operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:filenames arg:record_bytes arg:header_bytes arg:footer_bytes arg:buffer_size arg:compression_type arg:num_parallel_reads arg:name arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Assign Assign Assign Assign Assign FunctionDef name:creator_fn arg:filename arguments arg Return return:yes Call Assign Call Assign Call Call"
  },
  {
    "library": "django",
    "name": "_should_handle",
    "source_code": "def _should_handle(self, path):\n    return path.startswith(self.base_url.path) and (not self.base_url.netloc)",
    "docstring": "Check if the path should be handled. Ignore the path if: * the host is provided as part of the base_url * the request's path isn't under the media path (or equal)",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\handlers.py",
    "ast_data": "FunctionDef name:_should_handle arg:self arg:path arguments arg arg Return return:yes BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "run_node",
    "source_code": "def run_node(self, n: torch.fx.Node) -> Result:\n    from torch._guards import detect_fake_mode\n    result = super().run_node(n)\n    rebind_unbacked(detect_fake_mode().shape_env, n, result)\n    return result",
    "docstring": "Run an FX node, propagating unbacked Symbol bindings to the new fake tensor",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:run_node arg:self arg:n arguments arg arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "Mish",
    "source_code": "class Mish(Module):\n    __constants__ = ['inplace']\n    inplace: bool\n\n    def __init__(self, inplace: bool=False):\n        super().__init__()\n        self.inplace = inplace\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.mish(input, inplace=self.inplace)\n\n    def extra_repr(self) -> str:\n        inplace_str = 'inplace=True' if self.inplace else ''\n        return inplace_str",
    "docstring": "Applies the Mish function, element-wise. Mish: A Self Regularized Non-Monotonic Neural Activation Function. .. math:: \\text{Mish}(x) = x * \\text{Tanh}(\\text{Softplus}(x)) .. note:: See _ Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/Mish.png Examples:: >>> m = nn.Mish() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:Mish Assign FunctionDef name:__init__ arg:self arg:inplace arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, wait_until_step):\n    self._wait_until_step = wait_until_step",
    "docstring": "Initializes a . Args: wait_until_step: an shows until which global step should we wait.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:wait_until_step arguments arg arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "clear",
    "source_code": "def clear(self):\n    if self._subclass_uses_cla:\n        self.cla()\n    else:\n        self.__clear()",
    "docstring": "Clear the Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_track_trackable",
    "source_code": "def _track_trackable(self, trackable, name, overwrite=False):\n    self._maybe_initialize_trackable()\n    if not isinstance(trackable, Trackable):\n        raise TypeError(f'Trackable._track_trackable() can only be used to track objects of type Trackable. Got type {type(trackable)}.')\n    if not getattr(self, '_manual_tracking', True):\n        return trackable\n    new_reference = TrackableReference(name=name, ref=trackable)\n    current_object = self._lookup_dependency(name)\n    if current_object is not None and current_object is not trackable:\n        if not overwrite:\n            raise ValueError(f\"Called Trackable._track_trackable() with name='{name}', but a Trackable with this name is already declared as a dependency. Names must be unique (or overwrite=True).\")\n        for index, (old_name, _) in enumerate(self._self_unconditional_checkpoint_dependencies):\n            if name == old_name:\n                self._self_unconditional_checkpoint_dependencies[index] = new_reference\n    elif current_object is None:\n        self._self_unconditional_checkpoint_dependencies.append(new_reference)\n        self._handle_deferred_dependencies(name=name, trackable=trackable)\n    self._self_unconditional_dependency_names[name] = trackable\n    return trackable",
    "docstring": "Declare a dependency on another object. Indicates that checkpoints for this object should include variables from . Variables in a checkpoint are mapped to s based on the names provided when the checkpoint was written. To avoid breaking existing checkpoints when modifying a class, neither variable names nor dependency names (the names passed to ) may change. Args: trackable: A which this object depends on. name: A local name for , used for loading checkpoints into the correct objects. overwrite: Boolean, whether silently replacing dependencies is OK. Used for __setattr__, where throwing an error on attribute reassignment would be inappropriate. Returns: , for convenience when declaring a dependency and assigning to a member variable in one statement. Raises: TypeError: If does not inherit from . ValueError: If another object is already tracked by this name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_track_trackable arg:self arg:trackable arg:name arg:overwrite arguments arg arg arg arg Call If Call Raise Call Call If Call Return return:yes Assign Call Assign Call If BoolOp Compare Compare If Raise Call For Call If Compare Assign If Compare Call Call Assign Return return:yes"
  },
  {
    "library": "pygame",
    "name": "compile",
    "source_code": "def compile(strings, black='X', white='.', xor='o'):\n    size = (len(strings[0]), len(strings))\n    if size[0] % 8 or size[1] % 8:\n        raise ValueError(f'cursor string sizes must be divisible by 8 {size}')\n    for s in strings[1:]:\n        if len(s) != size[0]:\n            raise ValueError('Cursor strings are inconsistent lengths')\n    maskdata = []\n    filldata = []\n    maskitem = fillitem = 0\n    step = 8\n    for s in strings:\n        for c in s:\n            maskitem = maskitem << 1\n            fillitem = fillitem << 1\n            step = step - 1\n            if c == black:\n                maskitem = maskitem | 1\n                fillitem = fillitem | 1\n            elif c == white:\n                maskitem = maskitem | 1\n            elif c == xor:\n                fillitem = fillitem | 1\n            if not step:\n                maskdata.append(maskitem)\n                filldata.append(fillitem)\n                maskitem = fillitem = 0\n                step = 8\n    return (tuple(filldata), tuple(maskdata))",
    "docstring": "pygame.cursors.compile(strings, black, white, xor) -> data, mask compile cursor strings into cursor data This takes a set of strings with equal length and computes the binary data for that cursor. The string widths must be divisible by 8. The black and white arguments are single letter strings that tells which characters will represent black pixels, and which characters represent white pixels. All other characters are considered clear. Some systems allow you to set a special toggle color for the system color, this is also called the xor color. If the system does not support xor cursors, that color will simply be black. This returns a tuple containing the cursor data and cursor mask data. Both these arguments are used when setting a cursor with pygame.mouse.set_cursor().",
    "type": "function",
    "file_path": "pygame\\src_py\\cursors.py",
    "ast_data": "FunctionDef name:compile arg:strings arg:black arg:white arg:xor arguments arg arg arg arg Assign Call Call If BoolOp Raise Call For If Compare Call Raise Call Assign Assign Assign Assign For For Assign Assign Assign If Compare Assign Assign If Compare Assign If Compare Assign If Call Call Assign Assign Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "cdf",
    "source_code": "def cdf(self, x, loc=None, shape=1, df=1, allow_singular=False, *, maxpts=None, lower_limit=None, random_state=None):\n    dim, loc, shape, df = self._process_parameters(loc, shape, df)\n    shape = _PSD(shape, allow_singular=allow_singular)._M\n    return self._cdf(x, loc, shape, df, dim, maxpts, lower_limit, random_state)",
    "docstring": "Multivariate t-distribution cumulative distribution function. Parameters ---------- x : array_like Points at which to evaluate the cumulative distribution function. %(_mvt_doc_default_callparams)s maxpts : int, optional Maximum number of points to use for integration. The default is 1000 times the number of dimensions. lower_limit : array_like, optional Lower limit of integration of the cumulative distribution function. Default is negative infinity. Must be broadcastable with . %(_doc_random_state)s Returns ------- cdf : ndarray or scalar Cumulative distribution function evaluated at . Examples -------- >>> from scipy.stats import multivariate_t >>> x = [0.4, 5] >>> loc = [0, 1] >>> shape = [[1, 0.1], [0.1, 1]] >>> df = 7 >>> multivariate_t.cdf(x, loc, shape, df) 0.64798491",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:cdf arg:self arg:x arg:loc arg:shape arg:df arg:allow_singular arguments arg arg arg arg arg arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "components",
    "source_code": "@property\ndef components(self) -> DataFrame:\n    from pandas import DataFrame\n    columns = ['days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds', 'nanoseconds']\n    hasnans = self._hasna\n    if hasnans:\n\n        def f(x):\n            if isna(x):\n                return [np.nan] * len(columns)\n            return x.components\n    else:\n\n        def f(x):\n            return x.components\n    result = DataFrame([f(x) for x in self], columns=columns)\n    if not hasnans:\n        result = result.astype('int64')\n    return result",
    "docstring": "Return a DataFrame of the individual resolution components of the Timedeltas. The components (days, hours, minutes seconds, milliseconds, microseconds, nanoseconds) are returned as columns in a DataFrame. Returns ------- DataFrame See Also -------- TimedeltaIndex.total_seconds : Return total duration expressed in seconds. Timedelta.components : Return a components namedtuple-like of a single timedelta. Examples -------- >>> tdelta_idx = pd.to_timedelta([\"1 day 3 min 2 us 42 ns\"]) >>> tdelta_idx TimedeltaIndex(['1 days 00:03:00.000002042'], dtype='timedelta64[ns]', freq=None) >>> tdelta_idx.components days hours minutes seconds milliseconds microseconds nanoseconds 0 1 0 3 0 0 2 42",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\timedeltas.py",
    "ast_data": "FunctionDef name:components arg:self arguments arg Assign Assign If FunctionDef name:f arg:x arguments arg If Call Return return:yes Call Return return:yes FunctionDef name:f arg:x arguments arg Return return:yes Assign Call Call If Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "toggleTracebacks",
    "source_code": "@cherrypy.expose\ndef toggleTracebacks(self):\n    tracebacks = cherrypy.request.show_tracebacks\n    cherrypy.config.update({'request.show_tracebacks': not tracebacks})\n    raise cherrypy.HTTPRedirect('/')",
    "docstring": "Switch tracebacks setting on `` URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut10_http_errors.py",
    "ast_data": "FunctionDef name:toggleTracebacks arg:self arguments arg Assign Call Raise Call"
  },
  {
    "library": "django",
    "name": "tuple",
    "source_code": "@property\ndef tuple(self):\n    n = self.size\n    get_point = self._point_getter\n    if n == 1:\n        return get_point(0)\n    return tuple((get_point(i) for i in range(n)))",
    "docstring": "Return a tuple version of this coordinate sequence.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:tuple arg:self arguments arg Assign Assign If Compare Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "flush",
    "source_code": "def flush(self):\n    self.event_writer.flush()",
    "docstring": "Flushes the event file to disk. Call this method to make sure that all pending events have been written to disk.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py",
    "ast_data": "FunctionDef name:flush arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "diff",
    "source_code": "def diff(self, other):\n    r = set(self.global_state.keys()).difference(set(other.global_state.keys()))\n    if len(r) == 0:\n        return None\n    return r",
    "docstring": "Produces a delta against another GlobalContextCheckpointState. Returns None if no delta is found, otherwise, return a set() of mismatched global key names.",
    "type": "method",
    "file_path": "pytorch\\torch\\_guards.py",
    "ast_data": "FunctionDef name:diff arg:self arg:other arguments arg arg Assign Call Call Call Call Call If Compare Call Return return:no Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "mesh",
    "source_code": "def mesh(tag, vertices, colors, faces, config_dict, display_name=None, description=None):\n    from tensorboard.plugins.mesh import metadata\n    from tensorboard.plugins.mesh.plugin_data_pb2 import MeshPluginData\n    json_config = _get_json_config(config_dict)\n    summaries = []\n    tensors = [(vertices, MeshPluginData.VERTEX), (faces, MeshPluginData.FACE), (colors, MeshPluginData.COLOR)]\n    tensors = [tensor for tensor in tensors if tensor[0] is not None]\n    components = metadata.get_components_bitmask([content_type for tensor, content_type in tensors])\n    for tensor, content_type in tensors:\n        summaries.append(_get_tensor_summary(tag, display_name, description, tensor, content_type, components, json_config))\n    return Summary(value=summaries)",
    "docstring": "Output a merged protocol buffer with a mesh/point cloud. Args: tag: A name for this summary operation. vertices: Tensor of shape representing the 3D coordinates of vertices. faces: Tensor of shape containing indices of vertices within each triangle. colors: Tensor of shape containing colors for each vertex. display_name: If set, will be used as the display name in TensorBoard. Defaults to . description: A longform readable description of the summary data. Markdown is supported. config_dict: Dictionary with ThreeJS classes names and configuration. Returns: Merged summary for mesh/point cloud representation.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py",
    "ast_data": "FunctionDef name:mesh arg:tag arg:vertices arg:colors arg:faces arg:config_dict arg:display_name arg:description arguments arg arg arg arg arg arg arg Assign Call Assign Assign Assign Compare Assign Call For Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "def state_dict(self) -> dict[str, Any]:\n    return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}",
    "docstring": "Return the state of the scheduler as a :class:. It contains an entry for every variable in self.__dict__ which is not the optimizer.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg Return return:yes Call Compare"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse=('csr', 'csc'), copy=self.copy, dtype=FLOAT_DTYPES, force_writeable=True, reset=False, ensure_all_finite='allow-nan')\n    if sparse.issparse(X):\n        if self.with_scaling:\n            inplace_column_scale(X, 1.0 / self.scale_)\n    else:\n        if self.with_centering:\n            X -= self.center_\n        if self.with_scaling:\n            X /= self.scale_\n    return X",
    "docstring": "Center and scale the data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the specified axis. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Assign Call If Call If Call If If Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_temp_export_dir",
    "source_code": "def get_temp_export_dir(timestamped_export_dir):\n    dirname, basename = os.path.split(timestamped_export_dir)\n    if isinstance(basename, bytes):\n        str_name = basename.decode('utf-8')\n    else:\n        str_name = str(basename)\n    temp_export_dir = file_io.join(compat.as_bytes(dirname), compat.as_bytes('temp-{}'.format(str_name)))\n    return temp_export_dir",
    "docstring": "Builds a directory name based on the argument but starting with 'temp-'. This relies on the fact that TensorFlow Serving ignores subdirectories of the base directory that can't be parsed as integers. Args: timestamped_export_dir: the name of the eventual export directory, e.g. /foo/bar/ Returns: A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_utils.py",
    "ast_data": "FunctionDef name:get_temp_export_dir arg:timestamped_export_dir arguments arg Assign Call If Call Assign Call Assign Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_redirect_uris",
    "source_code": "def validate_redirect_uris(self):\n    uris = self.get('redirect_uris')\n    if uris:\n        for uri in uris:\n            self._validate_uri('redirect_uris', uri)",
    "docstring": "Array of redirection URI strings for use in redirect-based flows such as the authorization code and implicit flows. As required by Section 2 of OAuth 2.0 [RFC6749], clients using flows with redirection MUST register their redirection URI values. Authorization servers that support dynamic registration for redirect-based flows MUST implement support for this metadata value.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py",
    "ast_data": "FunctionDef name:validate_redirect_uris arg:self arguments arg Assign Call If For Call"
  },
  {
    "library": "kornia",
    "name": "get_transformation_matrix",
    "source_code": "def get_transformation_matrix(self, input: Tensor, params: Optional[Dict[str, Tensor]]=None, flags: Optional[Dict[str, Any]]=None) -> Tensor:\n    flags = self.flags if flags is None else flags\n    if params is not None:\n        transform = self.generate_transformation_matrix(input, params, flags)\n    elif self.transform_matrix is None:\n        params = self.forward_parameters(input.shape)\n        transform = self.generate_transformation_matrix(input, params, flags)\n    else:\n        transform = self.transform_matrix\n    return as_tensor(transform, device=input.device, dtype=input.dtype)",
    "docstring": "Obtain transformation matrices. Return the current transformation matrix if existed. Generate a new one, otherwise.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py",
    "ast_data": "FunctionDef name:get_transformation_matrix arg:self arg:input arg:params arg:flags arguments arg arg arg arg Assign Compare If Compare Assign Call If Compare Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_frommethod",
    "source_code": "class _frommethod:\n\n    def __init__(self, methodname, reversed=False):\n        self.__name__ = methodname\n        self.__qualname__ = methodname\n        self.__doc__ = self.getdoc()\n        self.reversed = reversed\n\n    def getdoc(self):\n        meth = getattr(MaskedArray, self.__name__, None) or getattr(np, self.__name__, None)\n        signature = self.__name__ + get_object_signature(meth)\n        if meth is not None:\n            doc = f'    {signature}\\n{getattr(meth, '__doc__', None)}'\n            return doc\n\n    def __call__(self, a, *args, **params):\n        if self.reversed:\n            args = list(args)\n            a, args[0] = (args[0], a)\n        marr = asanyarray(a)\n        method_name = self.__name__\n        method = getattr(type(marr), method_name, None)\n        if method is None:\n            method = getattr(np, method_name)\n        return method(marr, *args, **params)",
    "docstring": "Define functions from existing MaskedArray methods. Parameters ---------- methodname : str Name of the method to transform.",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "ClassDef name:_frommethod FunctionDef name:__init__ arg:self arg:methodname arg:reversed arguments arg arg arg Assign Assign Assign Call Assign FunctionDef name:getdoc arg:self arguments arg Assign BoolOp Call Call Assign Call If Compare Assign Call Return return:yes FunctionDef name:__call__ arg:self arg:a arguments arg arg arg arg If Assign Call Assign Assign Call Assign Assign Call Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "parse_data",
    "source_code": "def parse_data(self, data_str):\n    return None",
    "docstring": "Parse a value of this type.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:parse_data arg:self arg:data_str arguments arg arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "OperatorSupportBase",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass OperatorSupportBase(abc.ABC):\n\n    @abc.abstractmethod\n    def is_node_supported(self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node) -> bool:\n        raise NotImplementedError",
    "docstring": "Interface for determining if a fx.Node is supported by a backend",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py",
    "ast_data": "ClassDef name:OperatorSupportBase FunctionDef name:is_node_supported arg:self arg:submodules arg:node arguments arg arg arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "_perform_padding",
    "source_code": "def _perform_padding(image: Tensor) -> tuple[Tensor, int, int]:\n    H, W = image.shape[-2:]\n    h_pad: int = math.ceil(H / 16) * 16 - H\n    w_pad: int = math.ceil(W / 16) * 16 - W\n    image_padded: Tensor = F.pad(image, (0, w_pad, 0, h_pad), 'replicate')\n    return (image_padded, h_pad, w_pad)",
    "docstring": "Pad a given image to be dividable by 16. Args: image: Image of the shape :math:. Returns: image_padded: Padded image of the shape :math:. h_pad: Padded pixels along the horizontal axis. w_pad: Padded pixels along the vertical axis.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\jpeg.py",
    "ast_data": "FunctionDef name:_perform_padding arg:image arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_CollectionWithSizes",
    "source_code": "class _CollectionWithSizes(Collection):\n    _factor = 1.0\n\n    def get_sizes(self):\n        return self._sizes\n\n    def set_sizes(self, sizes, dpi=72.0):\n        if sizes is None:\n            self._sizes = np.array([])\n            self._transforms = np.empty((0, 3, 3))\n        else:\n            self._sizes = np.asarray(sizes)\n            self._transforms = np.zeros((len(self._sizes), 3, 3))\n            scale = np.sqrt(self._sizes) * dpi / 72.0 * self._factor\n            self._transforms[:, 0, 0] = scale\n            self._transforms[:, 1, 1] = scale\n            self._transforms[:, 2, 2] = 1.0\n        self.stale = True\n\n    @artist.allow_rasterization\n    def draw(self, renderer):\n        self.set_sizes(self._sizes, self.get_figure(root=True).dpi)\n        super().draw(renderer)",
    "docstring": "Base class for collections that have an array of sizes.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "ClassDef name:_CollectionWithSizes Assign FunctionDef name:get_sizes arg:self arguments arg Return return:yes FunctionDef name:set_sizes arg:self arg:sizes arg:dpi arguments arg arg arg If Compare Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Assign Assign Assign FunctionDef name:draw arg:self arg:renderer arguments arg arg Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "setbufsize",
    "source_code": "@set_module('numpy')\ndef setbufsize(size):\n    old = _get_extobj_dict()['bufsize']\n    extobj = _make_extobj(bufsize=size)\n    _extobj_contextvar.set(extobj)\n    return old",
    "docstring": "Set the size of the buffer used in ufuncs. .. versionchanged:: 2.0 The scope of setting the buffer is tied to the context. Exiting a `numpy.errstate` context manager the bufsize is restored: >>> import numpy as np >>> with np.errstate(): ... np.setbufsize(4096) ... print(np.getbufsize()) ... 8192 4096 >>> np.getbufsize() 8192",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_ufunc_config.py",
    "ast_data": "FunctionDef name:setbufsize arg:size arguments arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_parse_dtensor_env_var_from_cluster_resolver",
    "source_code": "def _parse_dtensor_env_var_from_cluster_resolver(cluster_resolver):\n    result = {}\n    cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_resolver.cluster_spec())\n    dtensor_jobs = []\n    if 'chief' in cluster_spec.jobs:\n        dtensor_jobs.extend(cluster_spec.job_tasks('chief'))\n    if 'worker' in cluster_spec.jobs:\n        dtensor_jobs.extend(cluster_spec.job_tasks('worker'))\n    if None in dtensor_jobs:\n        raise ValueError(f'Unexpected dtensor job address from cluster spec: {cluster_spec}')\n    result['DTENSOR_JOBS'] = ','.join(dtensor_jobs)\n    result['DTENSOR_NUM_CLIENTS'] = str(len(dtensor_jobs))\n    if cluster_resolver.task_type == 'chief':\n        dtensor_client_id = 0\n    elif cluster_resolver.task_type == 'worker':\n        dtensor_client_id = cluster_resolver.task_id\n        if 'chief' in cluster_spec.jobs:\n            dtensor_client_id += 1\n    result['DTENSOR_CLIENT_ID'] = str(dtensor_client_id)\n    result['DTENSOR_JOB_NAME'] = 'worker'\n    return result",
    "docstring": "Parse the env vars for Dtensor based on the cluster resolver. In the multi-client setting, each of the DTensor jobs need to aware of each other, and the interface to setup those values are via the envvars. The value used by dtensor are different from the existing . This function will parse the value from cluster resolver, and populate the corresponding value for DTensor jobs in the . Args: cluster_resolver: A instance. Returns: A dict of {Str:Str} which contains all the env vars needed by DTensor jobs. The value is for verification purpose. Raises: The value parsed from existing cluster spec is not valid.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\experimental\\multi_worker_mirrored_strategy.py",
    "ast_data": "FunctionDef name:_parse_dtensor_env_var_from_cluster_resolver arg:cluster_resolver arguments arg Assign Assign Call Call Assign If Compare Call Call If Compare Call Call If Compare Raise Call Assign Call Assign Call Call If Compare Assign If Compare Assign If Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "create_token_response",
    "source_code": "def create_token_response(self):\n    token = self.generate_token(scope=self.request.payload.scope, user=self.request.user, include_refresh_token=False)\n    log.debug('Issue token %r to %r', token, self.request.client)\n    self.save_token(token)\n    return (200, token, self.TOKEN_RESPONSE_HEADER)",
    "docstring": "If valid and authorized, the authorization server issues an access token.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7523\\jwt_bearer.py",
    "ast_data": "FunctionDef name:create_token_response arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_grad_fn",
    "source_code": "def _grad_fn(ys, xs, args, func_graph):\n    grad_ys = args[3:]\n    grad_outs = gradients_util._GradientsHelper(ys, xs, grad_ys=grad_ys, src_graph=func_graph, unconnected_gradients='zero')\n    assert all((g is not None for g in grad_outs))\n    counter = args[0]\n    maximum_iterations = args[1]\n    total_iters = args[2]\n    return [counter + 1, maximum_iterations, total_iters] + grad_outs",
    "docstring": "Computes the gradient of in the current graph. This function builds the gradient graph of the corresponding forward-pass by differentiating 's outputs w.r.t. its inputs. Args: ys: A or list of tensors to be differentiated. xs: A or list of tensors to be used for differentiation. args: The input arguments. args[0] - Loop counter args[1] - Total number of iterations. args[2] - maximum_iterations. args[3:] - Incoming gradients for . func_graph: function.FuncGraph. The corresponding forward-pass function. Returns: The output gradient Tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_grad_fn arg:ys arg:xs arg:args arg:func_graph arguments arg arg arg arg Assign Assign Call Call Compare Assign Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_add_fixed_qparams_to_dtype_configs",
    "source_code": "def _add_fixed_qparams_to_dtype_configs(dtype_configs: list[DTypeConfig], constraints: DTypeWithConstraints) -> list[DTypeConfig]:\n    new_dtype_configs = []\n    for dtype_config in dtype_configs:\n        dc = copy.deepcopy(dtype_config)\n        for orig_constraints in [dc.input_dtype_with_constraints, dc.output_dtype_with_constraints]:\n            if orig_constraints.dtype != constraints.dtype:\n                continue\n            if orig_constraints.scale_min_lower_bound is not None:\n                raise ValueError(f'scale_min_lower_bound is invalid for fixed qparams ops: {dtype_config}')\n            if orig_constraints.scale_max_upper_bound is not None:\n                raise ValueError(f'scale_max_upper_bound is invalid for fixed qparams ops: {dtype_config}')\n            orig_constraints.quant_min_lower_bound = constraints.quant_min_lower_bound\n            orig_constraints.quant_max_upper_bound = constraints.quant_max_upper_bound\n            orig_constraints.scale_exact_match = constraints.scale_exact_match\n            orig_constraints.zero_point_exact_match = constraints.zero_point_exact_match\n        new_dtype_configs.append(dc)\n    return new_dtype_configs",
    "docstring": "Return a copy of the list of DTypeConfigs where activations are subject to the specified constraints required for fixed qparams ops. If the data type doesn't match the one in the constraints, simply leave the corresponding DTypeConfig unchanged. If or is specified in the activations, throw an exception since these settings are incompatible with fixed qparams ops.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\_common_operator_config_utils.py",
    "ast_data": "FunctionDef name:_add_fixed_qparams_to_dtype_configs arg:dtype_configs arg:constraints arguments arg arg Assign For Assign Call For If Compare If Compare Raise Call If Compare Raise Call Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sparray",
    "source_code": "class sparray:\n\n    @classmethod\n    def __class_getitem__(cls, arg, /):\n        from types import GenericAlias\n        return GenericAlias(cls, arg)",
    "docstring": "A namespace class to separate sparray from spmatrix",
    "type": "class",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "ClassDef name:sparray FunctionDef name:__class_getitem__ arguments arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "interp",
    "source_code": "def interp(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor:\n    i = torch.clip(torch.searchsorted(xp, x, right=True), 1, len(xp) - 1)\n    return (fp[i - 1] * (xp[i] - x) + fp[i] * (x - xp[i - 1])) / (xp[i] - xp[i - 1])",
    "docstring": "One-dimensional linear interpolation for monotonically increasing sample points. Returns the one-dimensional piecewise linear interpolant to a function with given discrete data points :math:, evaluated at :math:. This is confirmed to be a correct implementation. See Args: x: the :math:-coordinates at which to evaluate the interpolated values. xp: the :math:-coordinates of the data points, must be increasing. fp: the :math:-coordinates of the data points, same length as . Returns: the interpolated values, same size as .",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\histogram_matching.py",
    "ast_data": "FunctionDef name:interp arg:x arg:xp arg:fp arguments arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "with_function_scope",
    "source_code": "def with_function_scope(thunk, scope_name, options):\n    with FunctionScope('lambda_', scope_name, options) as scope:\n        return thunk(scope)",
    "docstring": "Inline version of the FunctionScope context manager.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\function_wrappers.py",
    "ast_data": "FunctionDef name:with_function_scope arg:thunk arg:scope_name arg:options arguments arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "min",
    "source_code": "@_period_dispatch\ndef min(self, *, axis: AxisInt | None=None, skipna: bool=True, **kwargs):\n    nv.validate_min((), kwargs)\n    nv.validate_minmax_axis(axis, self.ndim)\n    result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)\n    return self._wrap_reduction_result(axis, result)",
    "docstring": "Return the minimum value of the Array or minimum along an axis. See Also -------- numpy.ndarray.min Index.min : Return the minimum value in an Index. Series.min : Return the minimum value in a Series.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:min arg:self arguments arg arg arg arg Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "Rgb255ToNormals",
    "source_code": "class Rgb255ToNormals(Module):\n\n    def forward(self, image: Tensor) -> Tensor:\n        return rgb255_to_normals(image)",
    "docstring": "Convert an image from RGB [0, 255] to surface normals for visualization purposes. Returns: surface normals version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> normals = Rgb255ToNormals() >>> output = normals(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "ClassDef name:Rgb255ToNormals FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "pad_or_backfill_inplace",
    "source_code": "def pad_or_backfill_inplace(values: np.ndarray, method: Literal['pad', 'backfill']='pad', axis: AxisInt=0, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None) -> None:\n    transf = (lambda x: x) if axis == 0 else lambda x: x.T\n    if values.ndim == 1:\n        if axis != 0:\n            raise AssertionError('cannot interpolate on a ndim == 1 with axis != 0')\n        values = values.reshape(tuple((1,) + values.shape))\n    method = clean_fill_method(method)\n    tvalues = transf(values)\n    func = get_fill_func(method, ndim=2)\n    func(tvalues, limit=limit, limit_area=limit_area)",
    "docstring": "Perform an actual interpolation of values, values will be make 2-d if needed fills inplace, returns the result. Parameters ---------- values: np.ndarray Input array. method: str, default \"pad\" Interpolation method. Could be \"bfill\" or \"pad\" axis: 0 or 1 Interpolation axis limit: int, optional Index limit on interpolation. limit_area: str, optional Limit area for interpolation. Can be \"inside\" or \"outside\" Notes ----- Modifies values in-place.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\missing.py",
    "ast_data": "FunctionDef name:pad_or_backfill_inplace arg:values arg:method arg:axis arg:limit arg:limit_area arguments arg arg arg arg arg Assign Compare arguments arg arguments arg If Compare If Compare Raise Call Assign Call Call Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "OptimizeWarning",
    "source_code": "class OptimizeWarning(UserWarning):\n    pass",
    "docstring": "General warning for :mod:.",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "ClassDef name:OptimizeWarning"
  },
  {
    "library": "tensorflow",
    "name": "_is_subscribed_identity",
    "source_code": "def _is_subscribed_identity(tensor):\n    if tensor.op.type != 'Identity':\n        return False\n    match = re.match('(?P<prefix_name>^.*?)/subscription/Identity[^/]+', tensor.name)\n    if match is None or len(match.groups()) != 1:\n        return False\n    prefix_name = match.group('prefix_name')\n    assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(tensor.op.name)\n    source_tensor = tensor.op.inputs[0]\n    if prefix_name != source_tensor.op.name:\n        return False\n    return True",
    "docstring": "Checks if the given tensor is an identity op returned by . Args: tensor: A to check. Returns: True if the given tensor matches the criteria for subscription identities: its op type is , its name matches the name of its input and conforms to the convention for subscribed nodes. False otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py",
    "ast_data": "FunctionDef name:_is_subscribed_identity arg:tensor arguments arg If Compare Return return:yes Assign Call If BoolOp Compare Compare Call Call Return return:yes Assign Call Compare Call Call Assign If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, save_steps=None, save_secs=None, output_dir='', show_dataflow=True, show_memory=False):\n    self._output_file = os.path.join(output_dir, 'timeline-{}.json')\n    self._file_writer = SummaryWriterCache.get(output_dir)\n    self._show_dataflow = show_dataflow\n    self._show_memory = show_memory\n    self._timer = SecondOrStepTimer(every_secs=save_secs, every_steps=save_steps)",
    "docstring": "Initializes a hook that takes periodic profiling snapshots. argument of is used to collect metadata about execution. This hook sets the metadata and dumps it in Chrome Trace format. Args: save_steps: , save profile traces every N steps. Exactly one of and should be set. save_secs: or , save profile traces every N seconds. output_dir: , the directory to save the profile traces to. Defaults to the current directory. show_dataflow: , if True, add flow events to the trace connecting producers and consumers of tensors. show_memory: , if True, add object snapshot events to the trace showing the sizes and lifetimes of tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:save_steps arg:save_secs arg:output_dir arg:show_dataflow arg:show_memory arguments arg arg arg arg arg arg Assign Call Assign Call Assign Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_try_parse_port",
    "source_code": "def _try_parse_port(port_str: str) -> Optional[int]:\n    if port_str and re.match('^[0-9]{1,5}$', port_str):\n        return int(port_str)\n    return None",
    "docstring": "Try to extract the port number from ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\utils.py",
    "ast_data": "FunctionDef name:_try_parse_port arg:port_str arguments arg If BoolOp Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "get_signature_def",
    "source_code": "def get_signature_def(meta_graph, signature_key):\n    signature_def_map = meta_graph.signature_def\n    signature_def_keys = set(signature_def_map.keys())\n    logging.info('The given SavedModel MetaGraphDef contains SignatureDefs with the following keys: %s', signature_def_keys)\n    if signature_key not in signature_def_keys:\n        raise ValueError(\"No '{}' in the SavedModel's SignatureDefs. Possible values are '{}'.\".format(signature_key, ','.join(signature_def_keys)))\n    return signature_def_map[signature_key]",
    "docstring": "Get the signature def from meta_graph with given signature_key. Args: meta_graph: meta_graph_def. signature_key: signature_def in the meta_graph_def. Returns: The signature_def used for tflite conversion. Raises: ValueError: Given signature_key is not valid for this meta_graph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert_saved_model.py",
    "ast_data": "FunctionDef name:get_signature_def arg:meta_graph arg:signature_key arguments arg arg Assign Assign Call Call Call If Compare Raise Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_deregister_flat_param",
    "source_code": "def _deregister_flat_param(state: _FSDPState, module: nn.Module) -> None:\n    if _has_fsdp_params(state, module):\n        cast(nn.Module, module.module)._parameters.pop(FLAT_PARAM, None)",
    "docstring": "De-registers the flattened parameter from the wrapped module, hiding it from `` methods.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_unshard_param_utils.py",
    "ast_data": "FunctionDef name:_deregister_flat_param arg:state arg:module arguments arg arg If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, node_pattern: NodePattern, modules: dict[str, torch.nn.Module], root_node_getter: Optional[Callable]=None, is_custom_module=False, is_standalone_module=False):\n    self.node_pattern = node_pattern\n    self.modules = modules\n    if root_node_getter is None:\n        root_node_getter = _default_root_node_getter\n    self.root_node = root_node_getter(node_pattern)\n    self.is_custom_module_ = is_custom_module\n    self.is_standalone_module_ = is_standalone_module\n    self.num_tensor_args = 0\n    if isinstance(self.root_node, Node):\n        cache_for_no_tensor_check: dict[Node, bool] = {}\n        for arg_idx in range(len(self.root_node.args)):\n            arg = self.root_node.args[arg_idx]\n            if isinstance(arg, Node) and (not all_node_args_have_no_tensors(arg, self.modules, cache_for_no_tensor_check)):\n                self.num_tensor_args += 1",
    "docstring": "Records pattern information in __init__, which will be used in convert",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\quantize_handler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:node_pattern arg:modules arg:root_node_getter arg:is_custom_module arg:is_standalone_module arguments arg arg arg arg arg arg Assign Assign If Compare Assign Assign Call Assign Assign Assign If Call For Call Call Assign If BoolOp Call Call"
  },
  {
    "library": "matplotlib",
    "name": "register_axis",
    "source_code": "def register_axis(self, axis):\n    self.axis = axis\n    self.stale = True",
    "docstring": "Register an axis. An axis should be registered with its corresponding spine from the Axes instance. This allows the spine to clear any axis properties when needed.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\spines.py",
    "ast_data": "FunctionDef name:register_axis arg:self arg:axis arguments arg arg Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_set_new_active_manager",
    "source_code": "@classmethod\ndef _set_new_active_manager(cls, manager):\n    if not hasattr(manager, '_cidgcf'):\n        manager._cidgcf = manager.canvas.mpl_connect('button_press_event', lambda event: cls.set_active(manager))\n    fig = manager.canvas.figure\n    fig._number = manager.num\n    label = fig.get_label()\n    if label:\n        manager.set_window_title(label)\n    cls.set_active(manager)",
    "docstring": "Adopt *manager* into pyplot and make it the active manager.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_pylab_helpers.py",
    "ast_data": "FunctionDef name:_set_new_active_manager arg:cls arg:manager arguments arg arg If Call Assign Call arguments arg Call Assign Assign Assign Call If Call Call"
  },
  {
    "library": "pandas",
    "name": "notnull",
    "source_code": "@doc(NDFrame.notna, klass=_shared_doc_kwargs['klass'])\ndef notnull(self) -> DataFrame:\n    return ~self.isna()",
    "docstring": "DataFrame.notnull is an alias for DataFrame.notna.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:notnull arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "get_line",
    "source_code": "def get_line(self, lineno: int) -> str:\n    return self.buffers[lineno - 1]",
    "docstring": "Returns specified line.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:get_line arg:self arg:lineno arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "variable_accessed",
    "source_code": "def variable_accessed(variable):\n    if hasattr(ops.get_default_graph(), 'watch_variable'):\n        ops.get_default_graph().watch_variable(variable)\n    if variable.trainable:\n        tape.variable_accessed(variable)",
    "docstring": "Records that was accessed for the tape and FuncGraph.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:variable_accessed arg:variable arguments arg If Call Call Call Call If Call"
  },
  {
    "library": "pytorch",
    "name": "_assert_valid_qconfig",
    "source_code": "def _assert_valid_qconfig(qconfig: Optional[QConfig], mod: torch.nn.Module) -> None:\n    if qconfig is None:\n        return\n    is_conv_transpose_mod = isinstance(mod, (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d))\n    if is_conv_transpose_mod:\n        if qconfig.weight is None:\n            return\n        example_observer = qconfig.weight()\n        is_per_channel = isinstance(example_observer, (torch.ao.quantization.PerChannelMinMaxObserver, torch.ao.quantization.MovingAveragePerChannelMinMaxObserver))\n        assert not is_per_channel, 'Per channel weight observer is not supported yet for ConvTranspose{n}d.'",
    "docstring": "Verifies that this is valid.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig.py",
    "ast_data": "FunctionDef name:_assert_valid_qconfig arg:qconfig arg:mod arguments arg arg If Compare Return return:no Assign Call If If Compare Return return:no Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "primitive_column_to_ndarray",
    "source_code": "def primitive_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]:\n    buffers = col.get_buffers()\n    data_buff, data_dtype = buffers['data']\n    data = buffer_to_ndarray(data_buff, data_dtype, offset=col.offset, length=col.size())\n    data = set_nulls(data, col, buffers['validity'])\n    return (data, buffers)",
    "docstring": "Convert a column holding one of the primitive dtypes to a NumPy array. A primitive type is one of: int, uint, float, bool. Parameters ---------- col : Column Returns ------- tuple Tuple of np.ndarray holding the data and the memory owner object that keeps the memory alive.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\interchange\\from_dataframe.py",
    "ast_data": "FunctionDef name:primitive_column_to_ndarray arg:col arguments arg Assign Call Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "visible",
    "source_code": "def visible(x, axis=None):\n    return np.array(x).dtype.kind != 'f' or np.isfinite(x).all(axis)",
    "docstring": "Detect \"invisible\" colors to set alpha appropriately.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\_marks\\base.py",
    "ast_data": "FunctionDef name:visible arg:x arg:axis arguments arg arg Return return:yes BoolOp Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "checkpoint_id",
    "source_code": "@property\ndef checkpoint_id(self) -> Union[str, os.PathLike]:\n    return self.path",
    "docstring": "return the checkpoint_id that will be used to load the checkpoint.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\filesystem.py",
    "ast_data": "FunctionDef name:checkpoint_id arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, device=''):\n    global _RESOURCE_TRACKER_STACK\n    for resource_tracker in _RESOURCE_TRACKER_STACK:\n        resource_tracker.add_resource(self)\n    super().__init__(device=device)",
    "docstring": "Initialize the . Args: device: A string indicating a required placement for this resource, e.g. \"CPU\" if this resource must be created on a CPU device. A blank device allows the user to place resource creation, so generally this should be blank unless the resource only makes sense on one device.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\resource.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:device arguments arg arg For Call Call Call"
  },
  {
    "library": "django",
    "name": "ordered_forms",
    "source_code": "@property\ndef ordered_forms(self):\n    if not self.is_valid() or not self.can_order:\n        raise AttributeError(\"'%s' object has no attribute 'ordered_forms'\" % self.__class__.__name__)\n    if not hasattr(self, '_ordering'):\n        self._ordering = []\n        for i, form in enumerate(self.forms):\n            if i >= self.initial_form_count() and (not form.has_changed()):\n                continue\n            if self.can_delete and self._should_delete_form(form):\n                continue\n            self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))\n\n        def compare_ordering_key(k):\n            if k[1] is None:\n                return (1, 0)\n            return (0, k[1])\n        self._ordering.sort(key=compare_ordering_key)\n    return [self.forms[i[0]] for i in self._ordering]",
    "docstring": "Return a list of form in the order specified by the incoming data. Raise an AttributeError if ordering is not allowed.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:ordered_forms arg:self arguments arg If BoolOp Call Raise Call If Call Assign For Call If BoolOp Compare Call Call If BoolOp Call Call FunctionDef name:compare_ordering_key arg:k arguments arg If Compare Return return:yes Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "constant_to_device",
    "source_code": "def constant_to_device(self, device: torch.device) -> IRNode:\n    loader = self.make_loader()\n    loader = patch.object(ConstantBuffer, 'override_device', device)(loader)\n    return Pointwise(device=device, dtype=self.dtype, inner_fn=loader, ranges=self.ranges)",
    "docstring": "Move this to a given device. Requires that all reads are to constants.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "FunctionDef name:constant_to_device arg:self arg:device arguments arg arg Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "stack",
    "source_code": "def stack(context=1):\n    return _inspect.stack(context)[1:]",
    "docstring": "TFDecorator-aware replacement for inspect.stack.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:stack arg:context arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_last_step_outputs",
    "source_code": "def _set_last_step_outputs(self, outputs):\n    if not isinstance(outputs, dict):\n        raise ValueError('Need a dictionary to set last_step_outputs.')\n    self._last_step_outputs = outputs",
    "docstring": "Replace the entire dictionary of last step outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:_set_last_step_outputs arg:self arg:outputs arguments arg arg If Call Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "adaptiveavgpool2d_inference_rule",
    "source_code": "@register_inference_rule(torch.nn.AdaptiveAvgPool2d)\ndef adaptiveavgpool2d_inference_rule(n: Node, module_instance):\n    assert isinstance(n.args[0], Node)\n    if n.args[0].type == Dyn and isinstance(n.type, TensorType):\n        n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))\n    if isinstance(n.args[0].type, TensorType):\n        output_type = adaptiveavgpool2d_check(n.args[0].type, module_instance)\n        n.type = get_greatest_upper_bound(n.type, output_type)\n    return n.type",
    "docstring": "The input and output sizes should be the same except for the last two dimensions taken from the input, which represent width and height",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:adaptiveavgpool2d_inference_rule arg:n arg:module_instance arguments arg arg Call If BoolOp Compare Call Assign Call Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_copy_handle_data",
    "source_code": "def _copy_handle_data(external_tensors, *branch_graph_outputs):\n    for tensors in zip(external_tensors, *branch_graph_outputs):\n        external = tensors[0]\n        internal = tensors[1:]\n        internal_handle_data = []\n        for tensor in internal:\n            handle_data = handle_data_util.get_resource_handle_data(tensor)\n            if not handle_data.is_set or len(handle_data.shape_and_type) != 1:\n                break\n            internal_handle_data.append(handle_data)\n        else:\n            combined_shape = tensor_shape.TensorShape(None)\n            combined_dtype = None\n            for handle_data in internal_handle_data:\n                handle_shape = tensor_shape.TensorShape(handle_data.shape_and_type[0].shape)\n                combined_shape = combined_shape.most_specific_compatible_shape(handle_shape)\n                if combined_dtype is None:\n                    combined_dtype = handle_data.shape_and_type[0].dtype\n                elif handle_data.shape_and_type[0].dtype != combined_dtype:\n                    combined_dtype = types_pb2.DT_INVALID\n            combined_handle_data = internal_handle_data[0]\n            combined_handle_data.shape_and_type[0].shape.CopyFrom(combined_shape.as_proto())\n            combined_handle_data.shape_and_type[0].dtype = combined_dtype\n            handle_data_util.set_handle_data(external, combined_handle_data)",
    "docstring": "Combines shapes in handle data and sets metadata on .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_copy_handle_data arg:external_tensors arguments arg arg For Call Assign Assign Assign For Assign Call If BoolOp Compare Call Call Assign Call Assign For Assign Call Assign Call If Compare Assign If Compare Assign Assign Call Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "kind",
    "source_code": "@property\ndef kind(self) -> str:\n    return self._dtype.kind",
    "docstring": "A character code (one of 'biufcmMOSUV') identifying the general kind of data.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:kind arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_alpha",
    "source_code": "def get_alpha(self):\n    return self._alpha",
    "docstring": "Return the alpha value used for blending - not supported on all backends.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_alpha arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "delete",
    "source_code": "def delete(self, function_type: function_type_lib.FunctionType, context: Optional[FunctionContext]=None) -> bool:\n    context = context or FunctionContext()\n    if (context, function_type) not in self._primary:\n        return False\n    del self._primary[context, function_type]\n    self._dispatch_dict[context].delete(function_type)\n    return True",
    "docstring": "Deletes a function given the context and type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_cache.py",
    "ast_data": "FunctionDef name:delete arg:self arg:function_type arg:context arguments arg arg arg Assign BoolOp Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_fullpath",
    "source_code": "def _fullpath(self, path):\n    splitpath = path.split(self._baseurl, 2)\n    if len(splitpath) == 1:\n        result = os.path.join(self._baseurl, path)\n    else:\n        result = path\n    return result",
    "docstring": "Return complete path for path. Prepends baseurl if necessary.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:_fullpath arg:self arg:path arguments arg arg Assign Call If Compare Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "as_shape",
    "source_code": "def as_shape(shape):\n    if isinstance(shape, tensor_shape.TensorShape):\n        return shape\n    else:\n        return tensor_shape.TensorShape(shape)",
    "docstring": "Converts the given object to a TensorShape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\variable_scope_shim.py",
    "ast_data": "FunctionDef name:as_shape arg:shape arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_query_iterator",
    "source_code": "@staticmethod\ndef _query_iterator(cursor, chunksize: int, columns, index_col=None, coerce_float: bool=True, parse_dates=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> Generator[DataFrame]:\n    has_read_data = False\n    while True:\n        data = cursor.fetchmany(chunksize)\n        if type(data) == tuple:\n            data = list(data)\n        if not data:\n            cursor.close()\n            if not has_read_data:\n                result = DataFrame.from_records([], columns=columns, coerce_float=coerce_float)\n                if dtype:\n                    result = result.astype(dtype)\n                yield result\n            break\n        has_read_data = True\n        yield _wrap_result(data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend)",
    "docstring": "Return generator through chunked result set",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:_query_iterator arg:cursor arg:chunksize arg:columns arg:index_col arg:coerce_float arg:parse_dates arg:dtype arg:dtype_backend arguments arg arg arg arg arg arg arg arg Assign While Assign Call If Compare Call Assign Call If Call If Assign Call If Assign Call Assign Call"
  },
  {
    "library": "authlib",
    "name": "validate_authorization_request",
    "source_code": "def validate_authorization_request(self, request):\n    if not request.token:\n        raise MissingRequiredParameterError('oauth_token')\n    credential = self.get_temporary_credential(request)\n    if not credential:\n        raise InvalidTokenError()\n    request.credential = credential\n    return request",
    "docstring": "Validate the request for resource owner authorization.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\authorization_server.py",
    "ast_data": "FunctionDef name:validate_authorization_request arg:self arg:request arguments arg arg If Raise Call Assign Call If Raise Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_show_tag_sets",
    "source_code": "def _show_tag_sets(saved_model_dir):\n    tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir)\n    print('The given SavedModel contains the following tag-sets:')\n    for tag_set in sorted(tag_sets):\n        print('%r' % ', '.join(sorted(tag_set)))",
    "docstring": "Prints the tag-sets stored in SavedModel directory. Prints all the tag-sets for MetaGraphs stored in SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:_show_tag_sets arg:saved_model_dir arguments arg Assign Call Call For Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "_embed_ptpython_shell",
    "source_code": "def _embed_ptpython_shell(namespace: dict[str, Any]={}, banner: str='') -> EmbedFuncT:\n    import ptpython.repl\n\n    @wraps(_embed_ptpython_shell)\n    def wrapper(namespace: dict[str, Any]=namespace, banner: str='') -> None:\n        print(banner)\n        ptpython.repl.embed(locals=namespace)\n    return wrapper",
    "docstring": "Start a ptpython shell",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\console.py",
    "ast_data": "FunctionDef name:_embed_ptpython_shell arg:namespace arg:banner arguments arg arg FunctionDef name:wrapper arg:namespace arg:banner arguments arg arg Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "PyLibMCCache",
    "source_code": "class PyLibMCCache(BaseMemcachedCache):\n\n    def __init__(self, server, params):\n        import pylibmc\n        super().__init__(server, params, library=pylibmc, value_not_found_exception=pylibmc.NotFound)\n\n    @property\n    def client_servers(self):\n        output = []\n        for server in self._servers:\n            output.append(server.removeprefix('unix:'))\n        return output\n\n    def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):\n        key = self.make_and_validate_key(key, version=version)\n        if timeout == 0:\n            return self._cache.delete(key)\n        return self._cache.touch(key, self.get_backend_timeout(timeout))\n\n    def close(self, **kwargs):\n        pass",
    "docstring": "An implementation of a cache binding using pylibmc",
    "type": "class",
    "file_path": "django\\django\\core\\cache\\backends\\memcached.py",
    "ast_data": "ClassDef name:PyLibMCCache FunctionDef name:__init__ arg:self arg:server arg:params arguments arg arg arg Call Call FunctionDef name:client_servers arg:self arguments arg Assign For Call Call Return return:yes FunctionDef name:touch arg:self arg:key arg:timeout arg:version arguments arg arg arg arg Assign Call If Compare Return return:yes Call Return return:yes Call Call FunctionDef name:close arg:self arguments arg arg"
  },
  {
    "library": "django",
    "name": "clone",
    "source_code": "def clone(self, name=None):\n    if name:\n        clone_name = name\n    elif self.driver.name != 'MEM':\n        clone_name = self.name + '_copy.' + self.driver.name\n    else:\n        clone_name = os.path.join(VSI_MEM_FILESYSTEM_BASE_PATH, str(uuid.uuid4()))\n    return GDALRaster(capi.copy_ds(self.driver._ptr, force_bytes(clone_name), self._ptr, c_int(), c_char_p(), c_void_p(), c_void_p()), write=self._write)",
    "docstring": "Return a clone of this GDALRaster.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\source.py",
    "ast_data": "FunctionDef name:clone arg:self arg:name arguments arg arg If Assign If Compare Assign Assign Call Call Call Return return:yes Call Call Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "max",
    "source_code": "def max(self, axis=None, skipna: bool=True, *args, **kwargs) -> int | float:\n    nv.validate_minmax_axis(axis)\n    nv.validate_max(args, kwargs)\n    return self._minmax('max')",
    "docstring": "The maximum value of the RangeIndex",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\range.py",
    "ast_data": "FunctionDef name:max arg:self arg:axis arg:skipna arguments arg arg arg arg arg Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "rpartition",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_partition_dispatcher)\ndef rpartition(a, sep):\n    a = np.asanyarray(a)\n    sep = np.asanyarray(sep)\n    if np.result_type(a, sep).char == 'T':\n        return _rpartition(a, sep)\n    sep = sep.astype(a.dtype, copy=False)\n    pos = _rfind_ufunc(a, sep, 0, MAX)\n    a_len = str_len(a)\n    sep_len = str_len(sep)\n    not_found = pos < 0\n    buffersizes1 = np.where(not_found, 0, pos)\n    buffersizes3 = np.where(not_found, a_len, a_len - pos - sep_len)\n    out_dtype = ','.join([f'{a.dtype.char}{n}' for n in (buffersizes1.max(), 1 if np.all(not_found) else sep_len.max(), buffersizes3.max())])\n    shape = np.broadcast_shapes(a.shape, sep.shape)\n    out = np.empty_like(a, shape=shape, dtype=out_dtype)\n    return _rpartition_index(a, sep, pos, out=(out['f0'], out['f1'], out['f2']))",
    "docstring": "Partition (split) each element around the right-most separator. For each element in `` dtype with the part after the separator See Also -------- str.rpartition Examples -------- >>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rpartition(a, 'A') (array(['aAaAa', ' a', 'abB'], dtype='<U5'), array(['A', 'A', 'A'], dtype='<U1'), array(['', ' ', 'Bba'], dtype='<U3'))",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:rpartition arg:a arg:sep arguments arg arg Assign Call Assign Call If Compare Call Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Compare Assign Call Assign Call Assign Call Call Call Call Call Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_find_rocm_home",
    "source_code": "def _find_rocm_home() -> Optional[str]:\n    rocm_home = os.environ.get('ROCM_HOME') or os.environ.get('ROCM_PATH')\n    if rocm_home is None:\n        hipcc_path = shutil.which('hipcc')\n        if hipcc_path is not None:\n            rocm_home = os.path.dirname(os.path.dirname(os.path.realpath(hipcc_path)))\n            if os.path.basename(rocm_home) == 'hip':\n                rocm_home = os.path.dirname(rocm_home)\n        else:\n            fallback_path = '/opt/rocm'\n            if os.path.exists(fallback_path):\n                rocm_home = fallback_path\n    if rocm_home and torch.version.hip is None:\n        logger.warning(\"No ROCm runtime is found, using ROCM_HOME='%s'\", rocm_home)\n    return rocm_home",
    "docstring": "Find the ROCm install path.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:_find_rocm_home arguments Assign BoolOp Call Call If Compare Assign Call If Compare Assign Call Call Call If Compare Call Assign Call Assign If Call Assign If BoolOp Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_call_wrapped_cell",
    "source_code": "def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):\n    raise NotImplementedError",
    "docstring": "Calls the wrapped cell and performs the wrapping logic. This method is called from the wrapper's or methods. Args: inputs: A tensor with wrapped cell's input. state: A tensor or tuple of tensors with wrapped cell's state. cell_call_fn: Wrapped cell's method to use for step computation (cell's or 'call' method). **kwargs: Additional arguments. Returns: A pair containing: - Output: A tensor with cell's output. - New state: A tensor or tuple of tensors with new wrapped cell's state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "FunctionDef name:_call_wrapped_cell arg:self arg:inputs arg:state arg:cell_call_fn arguments arg arg arg arg arg Raise"
  },
  {
    "library": "authlib",
    "name": "decrypt",
    "source_code": "def decrypt(self, ciphertext, aad, iv, tag, key):\n    self.check_iv(iv)\n    cipher = Cipher(AES(key), GCM(iv, tag), backend=default_backend())\n    d = cipher.decryptor()\n    d.authenticate_additional_data(aad)\n    return d.update(ciphertext) + d.finalize()",
    "docstring": "Key Decryption with AES GCM. :param ciphertext: ciphertext in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param tag: authentication tag in bytes :param key: encrypted key in bytes :return: message",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7518\\jwe_encs.py",
    "ast_data": "FunctionDef name:decrypt arg:self arg:ciphertext arg:aad arg:iv arg:tag arg:key arguments arg arg arg arg arg arg Call Assign Call Call Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_decorate",
    "source_code": "def _decorate(self, decorator):\n    if self._variable_creation_config is not None or self._no_variable_creation_config is not None:\n        raise ValueError('Functions cannot be decorated after they have been traced.')\n    self._python_function = decorator(self._python_function)\n    self._function_type, self._default_values = function_type_utils.make_function_type(self._python_function, self.input_signature)",
    "docstring": "Allows the captured Python function to be decorated in place. This method is only safe to call when the Function has not been called by a user. It makes sense to use this method to push a decorator into the function rather than wrapping the function in the decorator. We use this in tf.Module to allow user annotated to remain as objects but still automatically enter the Module name_scope when they are evaluated like all other methods. Args: decorator: A callable accepting a single argument which is the function to decorate and returning a callable result. Raises: ValueError: If the function has been called a ValueError is raised.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:_decorate arg:self arg:decorator arguments arg arg If BoolOp Compare Compare Raise Call Assign Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "_disallow_mismatched_datetimelike",
    "source_code": "def _disallow_mismatched_datetimelike(value, dtype: DtypeObj) -> None:\n    vdtype = getattr(value, 'dtype', None)\n    if vdtype is None:\n        return\n    elif vdtype.kind == 'm' and dtype.kind == 'M' or (vdtype.kind == 'M' and dtype.kind == 'm'):\n        raise TypeError(f'Cannot cast {value!r} to {dtype}')",
    "docstring": "numpy allows np.array(dt64values, dtype=\"timedelta64[ns]\") and vice-versa, but we do not want to allow this, so we need to check explicitly",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:_disallow_mismatched_datetimelike arg:value arg:dtype arguments arg arg Assign Call If Compare Return return:no If BoolOp BoolOp Compare Compare BoolOp Compare Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_lru_cache",
    "source_code": "def _lru_cache(fn: Callable[..., _T], maxsize: Optional[int]=None) -> functools._lru_cache_wrapper[_T]:\n    fn_cache = lru_cache(maxsize)(fn)\n    prior_version = 0\n    if config.validate_shape_env_version_key:\n        prior_key = None\n\n        @functools.wraps(fn)\n        def wrapper(self: ShapeEnv, *args: Any, **kwargs: Any) -> _T:\n            nonlocal prior_version, prior_key\n            if prior_key is None:\n                prior_key = self._get_key()\n            if prior_version != self._version_counter:\n                fn_cache.cache_clear()\n                prior_version = self._version_counter\n                prior_key = self._get_key()\n            else:\n                assert prior_key == self._get_key(), 'ShapeEnv cache key changed without version being updated!'\n            return fn_cache(self, *args, **kwargs)\n    else:\n\n        @functools.wraps(fn)\n        def wrapper(self: ShapeEnv, *args: Any, **kwargs: Any) -> _T:\n            nonlocal prior_version\n            if prior_version != self._version_counter:\n                fn_cache.cache_clear()\n                prior_version = self._version_counter\n            return fn_cache(self, *args, **kwargs)\n    wrapper.cache_clear = fn_cache.cache_clear\n    wrapper.cache_info = fn_cache.cache_info\n    return wrapper",
    "docstring": "Wrapper around lru_cache that clears when new info about shapes has been updated. Use lru_cache if the output is always the same, regardless of the constraints we know now (i.e. evaluate_expr) Use _lru_cache otherwise. Also note that this depends on _update_version_counter being called on the shape environment whenever the constraints are updated, otherwise the cache will not be cleared.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:_lru_cache arg:fn arg:maxsize arguments arg arg Assign Call Call Assign If Assign FunctionDef name:wrapper arg:self arguments arg arg arg If Compare Assign Call If Compare Call Assign Assign Call Compare Call Return return:yes Call Call FunctionDef name:wrapper arg:self arguments arg arg arg If Compare Call Assign Return return:yes Call Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compare_ops",
    "source_code": "def compare_ops(program_a: torch.export.ExportedProgram, program_b: torch.export.ExportedProgram) -> tuple[set[str], set[str]]:\n    program_a_ops = set(_count_fx_targets(program_a))\n    program_b_ops = set(_count_fx_targets(program_b))\n    return (program_a_ops - program_b_ops, program_b_ops - program_a_ops)",
    "docstring": "Compare and get unique ops in two exported programs. Args: program_a: The first exported program. program_b: The second exported program. Returns: A tuple of two sets, where the first set contains the unique ops in the first program and the second set contains the unique ops in the second program.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_analysis.py",
    "ast_data": "FunctionDef name:compare_ops arg:program_a arg:program_b arguments arg arg Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "VariableAccessTransformer",
    "source_code": "class VariableAccessTransformer(converter.Base):\n\n    def visit_Name(self, node):\n        if not anno.hasanno(node, anno.Static.ORIG_DEFINITIONS):\n            return node\n        if isinstance(node.ctx, gast.Load):\n            node = templates.replace_as_expression('ag__.ld(var_)', var_=node)\n        return node\n\n    def visit_Delete(self, node):\n        node = self.generic_visit(node)\n        rewrite_targets = []\n        for tgt in node.targets:\n            if isinstance(tgt, gast.Name):\n                rewrite_targets.append(tgt)\n        if not rewrite_targets:\n            return node\n        results = []\n        for tgt in rewrite_targets:\n            template = '\\n        var_ = ag__.Undefined(var_name)\\n      '\n            results.extend(templates.replace(template, var_=tgt, var_name=gast.Constant(tgt.id, kind=None)))\n        remaining_targets = [n for n in node.targets if n not in rewrite_targets]\n        if remaining_targets:\n            results.append(gast.Delete(targets=remaining_targets))\n        return results\n\n    def visit_AugAssign(self, node):\n        if isinstance(node.target, gast.Name):\n            template = '\\n        var_ = ag__.ld(var_)\\n        original\\n      '\n            node = templates.replace(template, var_=node.target, original=node)\n        else:\n            node = self.generic_visit(node)\n        return node",
    "docstring": "Rewrites basic symbol reads. This transformer rewrites variable reads with a \"read\" operator which allows tracking activity. Example: For a basic statement: a = b + c This is translated to: a = ld(b) + ld(c) Augmented assignment operations also introduce a operator: a += b The assignment target also receives an operator to properly represent the read: a = ld(a) a += ld(b)",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\variables.py",
    "ast_data": "ClassDef name:VariableAccessTransformer FunctionDef name:visit_Name arg:self arg:node arguments arg arg If Call Return return:yes If Call Assign Call Return return:yes FunctionDef name:visit_Delete arg:self arg:node arguments arg arg Assign Call Assign For If Call Call If Return return:yes Assign For Assign Call Call Call Assign Compare If Call Call Return return:yes FunctionDef name:visit_AugAssign arg:self arg:node arguments arg arg If Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pmf",
    "source_code": "def pmf(self, x, alpha, n):\n    return np.exp(self.logpmf(x, alpha, n))",
    "docstring": "Probability mass function for a Dirichlet multinomial distribution. Parameters ---------- x: ndarray Category counts (non-negative integers). Must be broadcastable with shape parameter ``. If multidimensional, the last axis must correspond with the categories. %(_dirichlet_mn_doc_default_callparams)s Returns ------- out: ndarray or scalar Probability mass function.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:pmf arg:self arg:x arg:alpha arg:n arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_onmove",
    "source_code": "def _onmove(self, event):\n    xdata, ydata = self._get_data_coords(event)\n    if self.direction == 'horizontal':\n        v = xdata\n        vpress = self._eventpress.xdata\n    else:\n        v = ydata\n        vpress = self._eventpress.ydata\n    if self._active_handle == 'C' and self._extents_on_press is not None:\n        vmin, vmax = self._extents_on_press\n        dv = v - vpress\n        vmin += dv\n        vmax += dv\n    elif self._active_handle and self._active_handle != 'C':\n        vmin, vmax = self._extents_on_press\n        if self._active_handle == 'min':\n            vmin = v\n        else:\n            vmax = v\n    else:\n        if self.ignore_event_outside and self._selection_completed:\n            return\n        vmin, vmax = (vpress, v)\n        if vmin > vmax:\n            vmin, vmax = (vmax, vmin)\n    self._set_extents((vmin, vmax))\n    if self.onmove_callback is not None:\n        self.onmove_callback(vmin, vmax)\n    return False",
    "docstring": "Motion notify event handler.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:_onmove arg:self arg:event arguments arg arg Assign Call If Compare Assign Assign Assign Assign If BoolOp Compare Compare Assign Assign If BoolOp Compare Assign If Compare Assign Assign If BoolOp Return return:no Assign If Compare Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_object_to_bytearray",
    "source_code": "def convert_object_to_bytearray(model_object, extra_buffer=b''):\n    builder = flatbuffers.Builder(1024)\n    model_offset = model_object.Pack(builder)\n    builder.Finish(model_offset, file_identifier=_TFLITE_FILE_IDENTIFIER)\n    model_bytearray = bytes(builder.Output())\n    model_bytearray = model_bytearray + extra_buffer\n    return model_bytearray",
    "docstring": "Converts a tflite model from an object to a immutable bytearray.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py",
    "ast_data": "FunctionDef name:convert_object_to_bytearray arg:model_object arg:extra_buffer arguments arg arg Assign Call Assign Call Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_shared_x_axes",
    "source_code": "def get_shared_x_axes(self):\n    return cbook.GrouperView(self._shared_axes['x'])",
    "docstring": "Return an immutable view on the shared x-axes Grouper.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_shared_x_axes arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_values",
    "source_code": "@property\ndef _values(self):\n    ordered = list(zip(*sorted(self.items(), key=lambda it: it[0])))\n    if ordered:\n        return ordered[1]\n    return []",
    "docstring": "Collect values for TrackableDataStructure.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py",
    "ast_data": "FunctionDef name:_values arg:self arguments arg Assign Call Call Call Call arguments arg If Return return:yes Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "get_memory_usage",
    "source_code": "@deprecation.deprecated(None, \"Use tf.config.experimental.get_memory_info(device)['current'] instead.\")\n@tf_export('config.experimental.get_memory_usage')\ndef get_memory_usage(device):\n    return get_memory_info(device)['current']",
    "docstring": "Get the current memory usage, in bytes, for the chosen device. This function is deprecated in favor of . Calling this function is equivalent to calling . See for specifying device strings. For example: >>> gpu_devices = tf.config.list_physical_devices('GPU') >>> if gpu_devices: ... tf.config.experimental.get_memory_usage('GPU:0') Does not work for CPU. For GPUs, TensorFlow will allocate all the memory by default, unless changed with . This function only returns the memory that TensorFlow is actually using, not the memory that TensorFlow has allocated on the GPU. Args: device: Device string to get the bytes in use for, e.g. Returns: Total memory usage in bytes. Raises: ValueError: Non-existent or CPU device specified.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:get_memory_usage arg:device arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "process_band_indices",
    "source_code": "def process_band_indices(self, only_lhs=False):\n    if only_lhs:\n        self.band_rhs = 1\n        self.band_lhs = self.lhs.band_index + 1\n        return\n    if isinstance(self.lhs, RasterBandTransform):\n        self.band_lhs = self.lhs.band_index + 1\n    else:\n        self.band_lhs = 1\n    self.band_rhs, *self.rhs_params = self.rhs_params",
    "docstring": "Extract the lhs band index from the band transform class and the rhs band index from the input tuple.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py",
    "ast_data": "FunctionDef name:process_band_indices arg:self arg:only_lhs arguments arg arg If Assign Assign Return return:no If Call Assign Assign Assign"
  },
  {
    "library": "kornia",
    "name": "identity_matrix",
    "source_code": "def identity_matrix(self, input: Tensor) -> Tensor:\n    return eye_like(3, input)",
    "docstring": "Return identity matrix.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\container\\image.py",
    "ast_data": "FunctionDef name:identity_matrix arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "InMemoryUploadedFile",
    "source_code": "class InMemoryUploadedFile(UploadedFile):\n\n    def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None):\n        super().__init__(file, name, content_type, size, charset, content_type_extra)\n        self.field_name = field_name\n\n    def open(self, mode=None):\n        self.file.seek(0)\n        return self\n\n    def chunks(self, chunk_size=None):\n        self.file.seek(0)\n        yield self.read()\n\n    def multiple_chunks(self, chunk_size=None):\n        return False",
    "docstring": "A file uploaded into memory (i.e. stream-to-memory).",
    "type": "class",
    "file_path": "django\\django\\core\\files\\uploadedfile.py",
    "ast_data": "ClassDef name:InMemoryUploadedFile FunctionDef name:__init__ arg:self arg:file arg:field_name arg:name arg:content_type arg:size arg:charset arg:content_type_extra arguments arg arg arg arg arg arg arg arg Call Call Assign FunctionDef name:open arg:self arg:mode arguments arg arg Call Return return:yes FunctionDef name:chunks arg:self arg:chunk_size arguments arg arg Call Call FunctionDef name:multiple_chunks arg:self arg:chunk_size arguments arg arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "transform_key",
    "source_code": "@staticmethod\ndef transform_key(key):\n    if key is None:\n        return 'None'\n    return key.title()",
    "docstring": "Title-case an HTTP header name.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:transform_key arg:key arguments arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "units",
    "source_code": "@property\ndef units(self):\n    units, name = (None, None)\n    if self.projected or self.local:\n        units, name = capi.linear_units(self.ptr, byref(c_char_p()))\n    elif self.geographic:\n        units, name = capi.angular_units(self.ptr, byref(c_char_p()))\n    if name is not None:\n        name = force_str(name)\n    return (units, name)",
    "docstring": "Return a 2-tuple of the units value and the units name. Automatically determine whether to return the linear or angular units.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:units arg:self arguments arg Assign If BoolOp Assign Call Call Call If Assign Call Call Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "tuple_to_list",
    "source_code": "def tuple_to_list(tuple_type: type[tuple]) -> type[list]:\n    type_args = getattr(tuple_type, '__args__', None)\n    if tuple_type is typing.Tuple or tuple_type is tuple or type_args == () or (type_args is None):\n        return list\n    elif len(type_args) == 1:\n        return list[type_args[0]]\n    elif len(type_args) == 2 and type_args[1] is Ellipsis:\n        return list[type_args[0]]\n    else:\n        return list[typing.Union[tuple(type_args)]]",
    "docstring": "Convert into a list type with the same type arguments. Assumes that is typing.Tuple type.",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\infer_schema.py",
    "ast_data": "FunctionDef name:tuple_to_list arg:tuple_type arguments arg Assign Call If BoolOp Compare Compare Compare Compare Return return:yes If Compare Call Return return:yes If BoolOp Compare Call Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "set_meta",
    "source_code": "def set_meta(self, **kwds):\n    self.meta.update(kwds)",
    "docstring": "Update the metadata dictionary with the keywords and data provided by keywords. Examples -------- :: data.set_meta(lab=\"Ph 7; Lab 26\", title=\"Ag110 + Ag108 Decay\")",
    "type": "method",
    "file_path": "scipy\\scipy\\odr\\_odrpack.py",
    "ast_data": "FunctionDef name:set_meta arg:self arguments arg arg Call"
  },
  {
    "library": "django",
    "name": "from_apps",
    "source_code": "@classmethod\ndef from_apps(cls, apps):\n    app_models = {}\n    for model in apps.get_models(include_swapped=True):\n        model_state = ModelState.from_model(model)\n        app_models[model_state.app_label, model_state.name_lower] = model_state\n    return cls(app_models)",
    "docstring": "Take an Apps and return a ProjectState matching it.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\state.py",
    "ast_data": "FunctionDef name:from_apps arg:cls arg:apps arguments arg arg Assign For Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "patch_dist",
    "source_code": "def patch_dist(dist):\n    old_parse_config_files = dist.Distribution.parse_config_files\n\n    def parse_config_files(self, *args, **kwargs):\n        result = old_parse_config_files(self, *args, **kwargs)\n        install = self.get_option_dict('install')\n        if 'prefix' in install:\n            install['prefix'] = (VIRTUALENV_PATCH_FILE, os.path.abspath(sys.prefix))\n        for base in ('purelib', 'platlib', 'headers', 'scripts', 'data'):\n            key = f'install_{base}'\n            if key in install:\n                install.pop(key, None)\n        return result\n    dist.Distribution.parse_config_files = parse_config_files",
    "docstring": "Distutils allows user to configure some arguments via a configuration file: Some of this arguments though don't make sense in context of the virtual environment files, let's fix them up.",
    "type": "function",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\_virtualenv.py",
    "ast_data": "FunctionDef name:patch_dist arg:dist arguments arg Assign FunctionDef name:parse_config_files arg:self arguments arg arg arg Assign Call Assign Call If Compare Assign Call For Assign If Compare Call Return return:yes Assign"
  },
  {
    "library": "matplotlib",
    "name": "triplot",
    "source_code": "def triplot(ax, *args, **kwargs):\n    import matplotlib.axes\n    tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)\n    x, y, edges = (tri.x, tri.y, tri.edges)\n    fmt = args[0] if args else ''\n    linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)\n    kw = cbook.normalize_kwargs(kwargs, mlines.Line2D)\n    for key, val in zip(('linestyle', 'marker', 'color'), (linestyle, marker, color)):\n        if val is not None:\n            kw.setdefault(key, val)\n    linestyle = kw['linestyle']\n    kw_lines = {**kw, 'marker': 'None', 'zorder': kw.get('zorder', 1)}\n    if linestyle not in [None, 'None', '', ' ']:\n        tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)\n        tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)\n        tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(), **kw_lines)\n    else:\n        tri_lines = ax.plot([], [], **kw_lines)\n    marker = kw['marker']\n    kw_markers = {**kw, 'linestyle': 'None'}\n    kw_markers.pop('label', None)\n    if marker not in [None, 'None', '', ' ']:\n        tri_markers = ax.plot(x, y, **kw_markers)\n    else:\n        tri_markers = ax.plot([], [], **kw_markers)\n    return tri_lines + tri_markers",
    "docstring": "Draw an unstructured triangular grid as lines and/or markers. Call signatures:: triplot(triangulation, ...) triplot(x, y, [triangles], *, [mask=mask], ...) The triangular grid can be specified either by passing a object as the first parameter, or by passing the points *x*, *y* and optionally the *triangles* and a *mask*. If neither of *triangulation* or *triangles* are given, the triangulation is calculated on the fly. Parameters ---------- triangulation : An already created triangular grid. x, y, triangles, mask Parameters defining the triangular grid. See . This is mutually exclusive with specifying *triangulation*. other_parameters All other args and kwargs are forwarded to . Returns ------- lines : The drawn triangles edges. markers : The drawn marker nodes.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triplot.py",
    "ast_data": "FunctionDef name:triplot arg:ax arguments arg arg arg Assign Call Assign Assign Assign Call Assign Call For Call If Compare Call Assign Assign Call If Compare Assign Call Assign Call Assign Call Call Call Assign Call Assign Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_key_counter_alg",
    "source_code": "def get_key_counter_alg(seed, alg):\n    if alg is None:\n        alg = Algorithm.AUTO_SELECT.value\n    alg = convert_alg_to_int(alg)\n    key, counter = _get_key_counter(seed, alg)\n    return (key, counter, alg)",
    "docstring": "Calculates the key, counter and algorithm to pass to raw RNG ops. This function calculates the key and counter, and determines the algorithm that will be passed to the raw RNG ops like . Depending on the input , the key and counter may be scrambled or copied from . If is , the key and counter will be determined at runtime based on device type. Args: seed: An integer tensor of shape [2]. The seed to calculate the key and counter from. alg: The RNG algorithm. See for an explanation. Returns: A pair (key, counter, algorithm) suitable for V2 stateless RNG ops like .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops_util.py",
    "ast_data": "FunctionDef name:get_key_counter_alg arg:seed arg:alg arguments arg arg If Compare Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_in_functional_construction_mode",
    "source_code": "def _in_functional_construction_mode(layer, inputs, args, kwargs, input_list):\n    return any((isinstance(tensor, keras_tensor.KerasTensor) for tensor in nest.flatten([inputs, args, kwargs])))",
    "docstring": "Check the arguments to see if we are constructing a functional model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_in_functional_construction_mode arg:layer arg:inputs arg:args arg:kwargs arg:input_list arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "score_style",
    "source_code": "def score_style(self, style1, style2):\n    if style1 == style2:\n        return 0.0\n    elif style1 in ('italic', 'oblique') and style2 in ('italic', 'oblique'):\n        return 0.1\n    return 1.0",
    "docstring": "Return a match score between *style1* and *style2*. An exact match returns 0.0. A match between 'italic' and 'oblique' returns 0.1. No match returns 1.0.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py",
    "ast_data": "FunctionDef name:score_style arg:self arg:style1 arg:style2 arguments arg arg arg If Compare Return return:yes If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "md5sum",
    "source_code": "def md5sum(file: IO[bytes]) -> str:\n    warnings.warn('The scrapy.utils.misc.md5sum function is deprecated and will be removed in a future version of Scrapy.', ScrapyDeprecationWarning, stacklevel=2)\n    m = hashlib.md5()\n    while True:\n        d = file.read(8096)\n        if not d:\n            break\n        m.update(d)\n    return m.hexdigest()",
    "docstring": "Calculate the md5 checksum of a file-like object without reading its whole content in memory. >>> from io import BytesIO >>> md5sum(BytesIO(b'file content to hash')) '784406af91dd5a54fbb9c84c2236595a'",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\misc.py",
    "ast_data": "FunctionDef name:md5sum arg:file arguments arg Call Assign Call While Assign Call If Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "param_required",
    "source_code": "def param_required(p):\n    return p.name != 'self' and p.kind != p.VAR_KEYWORD and (p.kind != p.VAR_POSITIONAL) and (p.default == p.empty)",
    "docstring": "Identify hyper parameters of an estimator.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:param_required arg:p arguments arg Return return:yes BoolOp Compare Compare Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "_calc_conv3d_flops",
    "source_code": "@ops.RegisterStatistics('Conv3D', 'flops')\ndef _calc_conv3d_flops(graph, node):\n    input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n    input_shape.assert_is_fully_defined()\n    filter_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])\n    filter_shape.assert_is_fully_defined()\n    output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n    output_shape.assert_is_fully_defined()\n    filter_time = int(filter_shape[0])\n    filter_height = int(filter_shape[1])\n    filter_width = int(filter_shape[2])\n    filter_in_depth = int(filter_shape[3])\n    output_count = np.prod(output_shape.as_list(), dtype=np.int64)\n    return ops.OpStats('flops', output_count * filter_in_depth * filter_time * filter_height * filter_width * 2)",
    "docstring": "Calculates the compute resources needed for Conv3D.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:_calc_conv3d_flops arg:graph arg:node arguments arg arg Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "astype",
    "source_code": "@final\ndef astype(self, dtype: DtypeObj, errors: IgnoreRaise='raise', squeeze: bool=False) -> Block:\n    values = self.values\n    if squeeze and values.ndim == 2 and is_1d_only_ea_dtype(dtype):\n        if values.shape[0] != 1:\n            raise ValueError('Can not squeeze with more than one column.')\n        values = values[0, :]\n    new_values = astype_array_safe(values, dtype, errors=errors)\n    new_values = maybe_coerce_values(new_values)\n    refs = None\n    if astype_is_view(values.dtype, new_values.dtype):\n        refs = self.refs\n    newb = self.make_block(new_values, refs=refs)\n    if newb.shape != self.shape:\n        raise TypeError(f'cannot set astype for dtype ({self.dtype.name} [{self.shape}]) to different shape ({newb.dtype.name} [{newb.shape}])')\n    return newb",
    "docstring": "Coerce to the new dtype. Parameters ---------- dtype : np.dtype or ExtensionDtype errors : str, {'raise', 'ignore'}, default 'raise' - `` : suppress exceptions. On error return original object squeeze : bool, default False squeeze values to ndim=1 if only one column is given Returns ------- Block",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\blocks.py",
    "ast_data": "FunctionDef name:astype arg:self arg:dtype arg:errors arg:squeeze arguments arg arg arg arg Assign If BoolOp Compare Call If Compare Raise Call Assign Assign Call Assign Call Assign If Call Assign Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, query_proj, key_proj, value_proj):\n    super().__init__()\n    self.query_proj = query_proj\n    self.key_proj = key_proj\n    self.value_proj = value_proj",
    "docstring": "A in-proj container to process inputs. Args: query_proj: a proj layer for query. key_proj: a proj layer for key. value_proj: a proj layer for value.",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:query_proj arg:key_proj arg:value_proj arguments arg arg arg arg Call Call Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "MetaEstimatorMixin",
    "source_code": "class MetaEstimatorMixin:\n    pass",
    "docstring": "Mixin class for all meta estimators in scikit-learn. This mixin is empty, and only exists to indicate that the estimator is a meta-estimator. .. versionchanged:: 1.6 The is now removed and is unnecessary since tests are refactored and don't use this anymore. Examples -------- >>> from sklearn.base import MetaEstimatorMixin >>> from sklearn.datasets import load_iris >>> from sklearn.linear_model import LogisticRegression >>> class MyEstimator(MetaEstimatorMixin): ... def __init__(self, *, estimator=None): ... self.estimator = estimator ... def fit(self, X, y=None): ... if self.estimator is None: ... self.estimator_ = LogisticRegression() ... else: ... self.estimator_ = self.estimator ... return self >>> X, y = load_iris(return_X_y=True) >>> estimator = MyEstimator().fit(X, y) >>> estimator.estimator_ LogisticRegression()",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "ClassDef name:MetaEstimatorMixin"
  },
  {
    "library": "matplotlib",
    "name": "register",
    "source_code": "def register(self, cmap, *, name=None, force=False):\n    _api.check_isinstance(colors.Colormap, cmap=cmap)\n    name = name or cmap.name\n    if name in self:\n        if not force:\n            raise ValueError(f'A colormap named \"{name}\" is already registered.')\n        elif name in self._builtin_cmaps:\n            raise ValueError(f'Re-registering the builtin cmap {name!r} is not allowed.')\n        _api.warn_external(f'Overwriting the cmap {name!r} that was already in the registry.')\n    self._cmaps[name] = cmap.copy()\n    if self._cmaps[name].name != name:\n        self._cmaps[name].name = name",
    "docstring": "Register a new colormap. The colormap name can then be used as a string argument to any `` is used. force : bool, default: False If False, a ValueError is raised if trying to overwrite an already registered name. True supports overwriting registered colormaps other than the builtin colormaps.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\cm.py",
    "ast_data": "FunctionDef name:register arg:self arg:cmap arguments arg arg arg arg Call Assign BoolOp If Compare If Raise Call If Compare Raise Call Call Assign Call If Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "get_dlrm_model",
    "source_code": "def get_dlrm_model(sparse_dlrm=False):\n    dlrm_model_config = {'m_spa': 16, 'ln_emb': np.array([1460, 583, 10131227, 2202608, 305, 24, 12517, 633, 3, 93145, 5683, 8351593, 3194, 27, 14992, 5461306, 10, 5652, 2173, 4, 7046547, 18, 15, 286181, 105, 142572], dtype=np.int32), 'ln_bot': np.array([13, 512, 256, 64, 16]), 'ln_top': np.array([367, 512, 256, 1]), 'arch_interaction_op': 'dot', 'arch_interaction_itself': False, 'sigmoid_bot': -1, 'sigmoid_top': 2, 'sync_dense_params': True, 'loss_threshold': 0.0, 'ndevices': 1, 'qr_flag': False, 'qr_operation': 'mult', 'qr_collisions': 4, 'qr_threshold': 200, 'md_flag': False, 'md_threshold': 200, 'weighted_pooling': None, 'loss_function': 'bce'}\n    if sparse_dlrm:\n        dlrm_model = SparseDLRM(**dlrm_model_config)\n    else:\n        dlrm_model = DLRM_Net(**dlrm_model_config)\n    return dlrm_model",
    "docstring": "Obtain dlrm model. The configs specified are based on the script in bench/dlrm_s_criteo_kaggle.sh. The same config is used to train the model for benchmarking on data sparsifier.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\dlrm_utils.py",
    "ast_data": "FunctionDef name:get_dlrm_model arg:sparse_dlrm arguments arg Assign Call Call Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "PSNRLoss",
    "source_code": "class PSNRLoss(nn.Module):\n\n    def __init__(self, max_val: float) -> None:\n        super().__init__()\n        self.max_val: float = max_val\n\n    def forward(self, image: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n        return psnr_loss(image, target, self.max_val)",
    "docstring": "Create a criterion that calculates the PSNR loss. The loss is computed as follows: .. math:: \\text{loss} = -\\text{psnr(x, y)} See :meth: for details abut PSNR. Args: max_val: The maximum value in the image tensor. Shape: - Image: arbitrary dimensional tensor :math:. - Target: arbitrary dimensional tensor :math: same shape as image. - Output: a scalar. Examples: >>> ones = torch.ones(1) >>> criterion = PSNRLoss(2.) >>> criterion(ones, 1.2 * ones) # 10 * log(4/((1.2-1)**2)) / log(10) tensor(-20.0000)",
    "type": "class",
    "file_path": "kornia\\kornia\\losses\\psnr.py",
    "ast_data": "ClassDef name:PSNRLoss FunctionDef name:__init__ arg:self arg:max_val arguments arg arg Call Call FunctionDef name:forward arg:self arg:image arg:target arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "match_affine_block",
    "source_code": "def match_affine_block(index: sympy.Expr, range_tree: IterationRangesRoot) -> Optional[BlockParameters]:\n    stride = BlockPatternMatcher.match_affine_block_expr(index, range_tree.symbol())\n    if stride is None:\n        return None\n    return BlockParameters(shape=[range_tree.numel], block_shape=[TritonSymbols.get_block_size(range_tree)], strides=[stride], offsets=[TritonSymbols.get_block_offset(range_tree)])",
    "docstring": "Matches expressions of the form: idx = s * xindex This implies stride (s,), and shape (XBLOCK,).",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:match_affine_block arg:index arg:range_tree arguments arg arg Assign Call Call If Compare Return return:no Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "num_input_pipelines",
    "source_code": "@property\ndef num_input_pipelines(self):\n    return self._num_input_pipelines",
    "docstring": "Returns the number of input pipelines.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:num_input_pipelines arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "output_classes",
    "source_code": "@property\n@deprecation.deprecated(None, 'Use `tf.compat.v1.data.get_output_classes(dataset)`.')\ndef output_classes(self):\n    return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), self.element_spec)",
    "docstring": "Returns the class of each component of an element of this dataset. Returns: A (nested) structure of Python objects corresponding to each component of an element of this dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:output_classes arg:self arguments arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "CamelCaseToSnakeCase",
    "source_code": "def CamelCaseToSnakeCase(camel_case_input):\n    s1 = re.sub('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', camel_case_input)\n    return re.sub('([a-z0-9])([A-Z])', '\\\\1_\\\\2', s1).lower()",
    "docstring": "Converts an identifier in CamelCase to snake_case.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tools\\visualize.py",
    "ast_data": "FunctionDef name:CamelCaseToSnakeCase arg:camel_case_input arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_json_to_tensor_description",
    "source_code": "@classmethod\ndef _json_to_tensor_description(cls, tensor_json):\n    if tensor_json is None:\n        return None\n    from cutlass_library import DataType\n    from cutlass_library.library import ComplexTransform, LayoutType, TensorDescription\n    element = cls._json_to_enum(tensor_json['element'], DataType)\n    layout = cls._json_to_enum(tensor_json['layout'], LayoutType)\n    alignment = tensor_json['alignment']\n    complex_transform = cls._json_to_enum(tensor_json['complex_transform'], ComplexTransform)\n    return TensorDescription(element, layout, alignment, complex_transform)",
    "docstring": "Convert JSON dict to TensorDescription object. Args: tensor_json: Dictionary representation Returns: TensorDescription: Reconstructed object",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\serialization.py",
    "ast_data": "FunctionDef name:_json_to_tensor_description arg:cls arg:tensor_json arguments arg arg If Compare Return return:no Assign Call Assign Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "device_key",
    "source_code": "def device_key(dev):\n    return '' if dev is None else dev",
    "docstring": "A sort key that allows None to be compared to strings.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:device_key arg:dev arguments arg Return return:yes Compare"
  },
  {
    "library": "numpy",
    "name": "flat",
    "source_code": "@property\ndef flat(self):\n    return MaskedIterator(self)",
    "docstring": "Return a flat iterator, or set a flattened version of self to value.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:flat arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "factorialk",
    "source_code": "def factorialk(n, k, exact=False, extend='zero'):\n    return _factorialx_wrapper('factorialk', n, k=k, exact=exact, extend=extend)",
    "docstring": "Multifactorial of n of order k, n(!!...!). This is the multifactorial of n skipping k values. For example, factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1 In particular, for any integer `` (see also [1]).:: z!(k) = k ** ((z - 1)/k) * gamma(z/k + 1) / gamma(1/k + 1) References ---------- .. [1] Complex extension to multifactorial",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:factorialk arg:n arg:k arg:exact arg:extend arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "audio",
    "source_code": "def audio(name, tensor, sample_rate, max_outputs, family=None, step=None):\n\n    def function(tag, scope):\n        return gen_summary_ops.write_audio_summary(_summary_state.writer._resource, _choose_step(step), tag, array_ops.identity(tensor), sample_rate=sample_rate, max_outputs=max_outputs, name=scope)\n    return summary_writer_function(name, tensor, function, family=family)",
    "docstring": "Writes an audio summary if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:audio arg:name arg:tensor arg:sample_rate arg:max_outputs arg:family arg:step arguments arg arg arg arg arg arg FunctionDef name:function arg:tag arg:scope arguments arg arg Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "sos2zpk",
    "source_code": "def sos2zpk(sos):\n    xp = array_namespace(sos)\n    sos = xp.asarray(sos)\n    n_sections = sos.shape[0]\n    z = xp.zeros(n_sections * 2, dtype=xp.complex128)\n    p = xp.zeros(n_sections * 2, dtype=xp.complex128)\n    k = 1.0\n    for section in range(n_sections):\n        zpk = tf2zpk(sos[section, :3], sos[section, 3:])\n        z = xpx.at(z, slice(2 * section, 2 * section + zpk[0].shape[0])).set(zpk[0])\n        p = xpx.at(p, slice(2 * section, 2 * section + zpk[1].shape[0])).set(zpk[1])\n        k *= zpk[2]\n    return (z, p, k)",
    "docstring": "Return zeros, poles, and gain of a series of second-order sections Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape `sosfilt` even if some of these are (effectively) zero. .. versionadded:: 0.16.0",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:sos2zpk arg:sos arguments arg Assign Call Assign Call Assign Assign Call Assign Call Assign For Call Assign Call Assign Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_is_avx512_supported",
    "source_code": "def _is_avx512_supported() -> bool:\n    return torch._C._cpu._is_avx512_supported()",
    "docstring": "Returns a bool indicating if CPU supports AVX512.",
    "type": "function",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "FunctionDef name:_is_avx512_supported arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "scatter_add",
    "source_code": "def scatter_add(self, sparse_delta, use_locking=False, name=None):\n    if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n        raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}')\n    return self._lazy_read(gen_resource_variable_ops.resource_scatter_add(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))",
    "docstring": "Adds to this variable. Args: sparse_delta: to be added to this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:scatter_add arg:self arg:sparse_delta arg:use_locking arg:name arguments arg arg arg arg If Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "numpy",
    "name": "enable",
    "source_code": "def enable(self, shrink=1):\n    self._enabled = shrink",
    "docstring": "Set the enabling shrink to .",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:enable arg:self arg:shrink arguments arg arg Assign"
  },
  {
    "library": "numpy",
    "name": "_remove_nan_1d",
    "source_code": "def _remove_nan_1d(arr1d, second_arr1d=None, overwrite_input=False):\n    if arr1d.dtype == object:\n        c = np.not_equal(arr1d, arr1d, dtype=bool)\n    else:\n        c = np.isnan(arr1d)\n    s = np.nonzero(c)[0]\n    if s.size == arr1d.size:\n        warnings.warn('All-NaN slice encountered', RuntimeWarning, stacklevel=6)\n        if second_arr1d is None:\n            return (arr1d[:0], None, True)\n        else:\n            return (arr1d[:0], second_arr1d[:0], True)\n    elif s.size == 0:\n        return (arr1d, second_arr1d, overwrite_input)\n    else:\n        if not overwrite_input:\n            arr1d = arr1d.copy()\n        enonan = arr1d[-s.size:][~c[-s.size:]]\n        arr1d[s[:enonan.size]] = enonan\n        if second_arr1d is None:\n            return (arr1d[:-s.size], None, True)\n        else:\n            if not overwrite_input:\n                second_arr1d = second_arr1d.copy()\n            enonan = second_arr1d[-s.size:][~c[-s.size:]]\n            second_arr1d[s[:enonan.size]] = enonan\n            return (arr1d[:-s.size], second_arr1d[:-s.size], True)",
    "docstring": "Equivalent to arr1d[~arr1d.isnan()], but in a different order Presumably faster as it incurs fewer copies Parameters ---------- arr1d : ndarray Array to remove nans from second_arr1d : ndarray or None A second array which will have the same positions removed as arr1d. overwrite_input : bool True if can be modified in place Returns ------- res : ndarray Array with nan elements removed second_res : ndarray or None Second array with nan element positions of first array removed. overwrite_input : bool True if can be modified in place, given the constraint on the input",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py",
    "ast_data": "FunctionDef name:_remove_nan_1d arg:arr1d arg:second_arr1d arg:overwrite_input arguments arg arg arg If Compare Assign Call Assign Call Assign Call If Compare Call If Compare Return return:yes Return return:yes If Compare Return return:yes If Assign Call Assign Assign If Compare Return return:yes If Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_header_version",
    "source_code": "def _get_header_version(path, name):\n    for line in io.open(path, 'r', encoding='utf-8').readlines():\n        match = re.match('\\\\s*#\\\\s*define %s\\\\s+(\\\\d+)' % name, line)\n        if match:\n            return match.group(1)\n    return ''",
    "docstring": "Returns preprocessor defines in C header file.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\third_party\\gpus\\find_cuda_config.py",
    "ast_data": "FunctionDef name:_get_header_version arg:path arg:name arguments arg arg For Call Call Assign Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_plugin_asset",
    "source_code": "def get_plugin_asset(plugin_asset_cls, graph=None):\n    if graph is None:\n        graph = ops.get_default_graph()\n    if not plugin_asset_cls.plugin_name:\n        raise ValueError('Class %s has no plugin_name' % plugin_asset_cls.__name__)\n    name = _PLUGIN_ASSET_PREFIX + plugin_asset_cls.plugin_name\n    container = graph.get_collection(name)\n    if container:\n        if len(container) != 1:\n            raise ValueError('Collection for %s had %d items, expected 1' % (name, len(container)))\n        instance = container[0]\n        if not isinstance(instance, plugin_asset_cls):\n            raise ValueError('Plugin name collision between classes %s and %s' % (plugin_asset_cls.__name__, instance.__class__.__name__))\n    else:\n        instance = plugin_asset_cls()\n        graph.add_to_collection(name, instance)\n        graph.add_to_collection(_PLUGIN_ASSET_PREFIX, plugin_asset_cls.plugin_name)\n    return instance",
    "docstring": "Acquire singleton PluginAsset instance from a graph. PluginAssets are always singletons, and are stored in tf Graph collections. This way, they can be defined anywhere the graph is being constructed, and if the same plugin is configured at many different points, the user can always modify the same instance. Args: plugin_asset_cls: The PluginAsset class graph: (optional) The graph to retrieve the instance from. If not specified, the default graph is used. Returns: An instance of the plugin_asset_class Raises: ValueError: If we have a plugin name collision, or if we unexpectedly find the wrong number of items in a collection.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\plugin_asset.py",
    "ast_data": "FunctionDef name:get_plugin_asset arg:plugin_asset_cls arg:graph arguments arg arg If Compare Assign Call If Raise Call Assign Assign Call If If Compare Call Raise Call Call Assign If Call Raise Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sparse_retain",
    "source_code": "@tf_export('sparse.retain', v1=['sparse.retain', 'sparse_retain'])\n@deprecation.deprecated_endpoints('sparse_retain')\ndef sparse_retain(sp_input, to_retain):\n    sp_input = _convert_to_sparse_tensor(sp_input)\n    to_retain = ops.convert_to_tensor(to_retain)\n    retain_shape = to_retain.get_shape()\n    retain_shape.assert_has_rank(1)\n    if sp_input.values.get_shape().dims is not None:\n        sp_input.values.get_shape().dims[0].assert_is_compatible_with(tensor_shape.dimension_at_index(retain_shape, 0))\n    where_true = array_ops.reshape(array_ops.where_v2(to_retain), [-1])\n    new_indices = array_ops.gather(sp_input.indices, where_true)\n    new_values = array_ops.gather(sp_input.values, where_true)\n    return sparse_tensor.SparseTensor(new_indices, new_values, array_ops.identity(sp_input.dense_shape))",
    "docstring": "Retains specified non-empty values within a . For example, if has shape and 4 non-empty string values: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d and , then the output will be a of shape with 2 non-empty values: [0, 1]: a [3, 1]: d Args: sp_input: The input with non-empty elements. to_retain: A bool vector of length with true values. Returns: A with the same shape as the input and non-empty elements corresponding to the true positions in . Raises: TypeError: If is not a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:sparse_retain arg:sp_input arg:to_retain arguments arg arg Assign Call Assign Call Assign Call Call If Compare Call Call Call Call Assign Call Call Assign Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "numpy",
    "name": "as_number",
    "source_code": "def as_number(obj, kind=4):\n    if isinstance(obj, int):\n        return Expr(Op.INTEGER, (obj, kind))\n    if isinstance(obj, float):\n        return Expr(Op.REAL, (obj, kind))\n    if isinstance(obj, Expr):\n        if obj.op in (Op.INTEGER, Op.REAL):\n            return obj\n    raise OpError(f'cannot convert {obj} to INTEGER or REAL constant')",
    "docstring": "Return object as INTEGER or REAL constant.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:as_number arg:obj arg:kind arguments arg arg If Call Return return:yes Call If Call Return return:yes Call If Call If Compare Return return:yes Raise Call"
  },
  {
    "library": "numpy",
    "name": "sort",
    "source_code": "def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None, *, stable=None):\n    a = np.array(a, copy=True, subok=True)\n    if axis is None:\n        a = a.flatten()\n        axis = 0\n    if isinstance(a, MaskedArray):\n        a.sort(axis=axis, kind=kind, order=order, endwith=endwith, fill_value=fill_value, stable=stable)\n    else:\n        a.sort(axis=axis, kind=kind, order=order, stable=stable)\n    return a",
    "docstring": "Return a sorted copy of the masked array. Equivalent to creating a copy of the array and applying the MaskedArray `` for the full documentation See Also -------- MaskedArray.sort : equivalent method Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> x = [11.2, -3.973, 0.801, -1.41] >>> mask = [0, 0, 0, 1] >>> masked_x = ma.masked_array(x, mask) >>> masked_x masked_array(data=[11.2, -3.973, 0.801, --], mask=[False, False, False, True], fill_value=1e+20) >>> ma.sort(masked_x) masked_array(data=[-3.973, 0.801, 11.2, --], mask=[False, False, False, True], fill_value=1e+20)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:sort arg:a arg:axis arg:kind arg:order arg:endwith arg:fill_value arguments arg arg arg arg arg arg arg Assign Call If Compare Assign Call Assign If Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_test_batch_begin",
    "source_code": "def on_test_batch_begin(self, batch, logs=None):\n    if self._should_call_test_batch_hooks:\n        self._call_batch_hook(ModeKeys.TEST, 'begin', batch, logs=logs)",
    "docstring": "Calls the methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of . Typically, the values of the 's metrics are returned. Example: .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_test_batch_begin arg:self arg:batch arg:logs arguments arg arg arg If Call"
  },
  {
    "library": "sphinx",
    "name": "Parser",
    "source_code": "class Parser(docutils.parsers.Parser):\n    config: Config\n    env: BuildEnvironment\n\n    def set_application(self, app: Sphinx) -> None:\n        self._app = app\n        self.config = app.config\n        self.env = app.env",
    "docstring": "A base class of source parsers. The additional parsers should inherit this class instead of ``, this class improves accessibility to Sphinx APIs. The subclasses can access sphinx core runtime objects (app, config and env).",
    "type": "class",
    "file_path": "sphinx\\sphinx\\parsers.py",
    "ast_data": "ClassDef name:Parser FunctionDef name:set_application arg:self arg:app arguments arg arg Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "zeros_like",
    "source_code": "@doc_controls.do_not_generate_docs\ndef zeros_like(x, dtype=None, name=None):\n    return array_ops.zeros_like(x, dtype=dtype, name=name)",
    "docstring": "Instantiates an all-zeros variable of the same shape as another tensor. Args: x: Keras variable or Keras tensor. dtype: dtype of returned Keras variable. uses the dtype of . name: name for the variable to create. Returns: A Keras variable with the shape of filled with zeros. Example:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:zeros_like arg:x arg:dtype arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "to_float",
    "source_code": "def to_float(self):\n    cls = type(self)\n    conv = cls._FLOAT_CONV_MODULE(self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, self.dilation, self.groups, self.bias is not None, self.padding_mode)\n    conv.weight = torch.nn.Parameter(self.weight.detach())\n    if self.bias is not None:\n        conv.bias = torch.nn.Parameter(self.bias.detach())\n    if issubclass(cls, _FusedModule):\n        modules = [conv]\n        assert hasattr(cls, '_FLOAT_RELU_MODULE')\n        relu = cls._FLOAT_RELU_MODULE()\n        modules.append(relu)\n        fused = cls._FLOAT_MODULE(*modules)\n        fused.train(self.training)\n        return fused\n    else:\n        return conv",
    "docstring": "This works for both single qat conv, and the qat conv - relu modules to convert the qat module to a floating point module",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\qat\\modules\\conv.py",
    "ast_data": "FunctionDef name:to_float arg:self arguments arg Assign Call Assign Call Compare Assign Call Call If Compare Assign Call Call If Call Assign Call Assign Call Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_request",
    "source_code": "def validate_request(self, request):\n    pass",
    "docstring": "A method to validate if the HTTP request is valid or not. Developers MUST re-implement this method. For instance, your server requires a \"X-Device-Version\" in the header:: def validate_request(self, request): if \"X-Device-Version\" not in request.headers: raise InvalidRequestError() Usually, you don't have to detect if the request is valid or not. If you have to, you MUST re-implement this method. :param request: instance of HttpRequest :raise: InvalidRequestError",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\resource_protector.py",
    "ast_data": "FunctionDef name:validate_request arg:self arg:request arguments arg arg"
  },
  {
    "library": "cherrypy",
    "name": "close",
    "source_code": "def close(self):\n    self.rfile.close()",
    "docstring": "Close the underlying file object.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "outermost_graphs",
    "source_code": "def outermost_graphs(self):\n    return [graph for graph in self._graph_by_id.values() if not graph.outer_graph_id]",
    "docstring": "Get the number of outer most graphs read so far.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:outermost_graphs arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "counter",
    "source_code": "@property\ndef counter(self):\n    return self._counter",
    "docstring": "Returns the counter as a float32 .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:counter arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "unwrap_values",
    "source_code": "def unwrap_values(distribution_strategy, grouped_inputs, grouped_outputs, grouped_updates=None, grouped_session_args=None, with_loss_tensor=False):\n    all_inputs = flatten_per_replica_values(distribution_strategy, grouped_inputs)\n    all_outputs = unwrap_outputs(distribution_strategy, grouped_outputs, with_loss_tensor)\n    if grouped_updates:\n        all_updates = flatten_per_replica_values(distribution_strategy, grouped_updates)\n    else:\n        all_updates = None\n    all_session_args = {}\n    if grouped_session_args:\n        grouped_feed_dict = grouped_session_args.get('feed_dict')\n        if grouped_feed_dict:\n            all_session_args['feed_dict'] = flatten_per_replica_values(distribution_strategy, grouped_feed_dict)\n        grouped_fetches = grouped_session_args.get('fetches')\n        if grouped_fetches:\n            all_session_args['fetches'] = flatten_per_replica_values(distribution_strategy, grouped_fetches)\n    return (all_inputs, all_outputs, all_updates, all_session_args)",
    "docstring": "Unwrap the list of values contained in the PerReplica parameters. This function calls to parse each of the input parameters into a list of values on the different devices. If we set to be True, we also call on the list of losses on the different devices to give us one loss tensor. Args: distribution_strategy: DistributionStrategy used to distribute training and validation. grouped_inputs: PerReplica inputs returned from the train or test function that we ran on each device. grouped_outputs: PerReplica outputs returned from the train or test function that we ran on each device. grouped_updates: PerReplica updates returned from the train or test function that we ran on each device. grouped_session_args: PerReplica session args returned from the train or test function that we ran on each device. with_loss_tensor: Boolean that indicates if we need to add the reduced loss tensor as one of the outputs. Returns: Values of each of the PerReplica parameters.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:unwrap_values arg:distribution_strategy arg:grouped_inputs arg:grouped_outputs arg:grouped_updates arg:grouped_session_args arg:with_loss_tensor arguments arg arg arg arg arg arg Assign Call Assign Call If Assign Call Assign Assign If Assign Call If Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_post_plot_logic_common",
    "source_code": "@final\ndef _post_plot_logic_common(self, ax: Axes) -> None:\n    if self.orientation == 'vertical' or self.orientation is None:\n        type(self)._apply_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize)\n        type(self)._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)\n        if hasattr(ax, 'right_ax'):\n            type(self)._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)\n    elif self.orientation == 'horizontal':\n        type(self)._apply_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize)\n        type(self)._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)\n        if hasattr(ax, 'right_ax'):\n            type(self)._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)\n    else:\n        raise ValueError",
    "docstring": "Common post process for each axes",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py",
    "ast_data": "FunctionDef name:_post_plot_logic_common arg:self arg:ax arguments arg arg If BoolOp Compare Compare Call Call Call Call If Call Call Call If Compare Call Call Call Call If Call Call Call Raise"
  },
  {
    "library": "django",
    "name": "StopUpload",
    "source_code": "class StopUpload(UploadFileException):\n\n    def __init__(self, connection_reset=False):\n        self.connection_reset = connection_reset\n\n    def __str__(self):\n        if self.connection_reset:\n            return 'StopUpload: Halt current upload.'\n        else:\n            return 'StopUpload: Consume request data, then halt.'",
    "docstring": "This exception is raised when an upload must abort.",
    "type": "class",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "ClassDef name:StopUpload FunctionDef name:__init__ arg:self arg:connection_reset arguments arg arg Assign FunctionDef name:__str__ arg:self arguments arg If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_dense_tensor",
    "source_code": "def get_dense_tensor(self, transformation_cache, state_manager):\n    if isinstance(self.categorical_column, SequenceCategoricalColumn):\n        raise ValueError('In indicator_column: {}. categorical_column must not be of type SequenceCategoricalColumn. Suggested fix A: If you wish to use DenseFeatures, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use SequenceFeatures instead of DenseFeatures. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n    return transformation_cache.get(self, state_manager)",
    "docstring": "Returns dense representing feature. Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables. Returns: Dense created within . Raises: ValueError: If is a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_dense_tensor arg:self arg:transformation_cache arg:state_manager arguments arg arg arg If Call Raise Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "shapes",
    "source_code": "@property\ndef shapes(self):\n    return self._shapes",
    "docstring": "The list of shapes for each component of a queue element.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:shapes arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self):\n    if mpl.rcParams['_internal.classic_mode']:\n        nbins = 9\n        steps = [1, 2, 5, 10]\n    else:\n        nbins = 'auto'\n        steps = [1, 2, 2.5, 5, 10]\n    super().__init__(nbins=nbins, steps=steps)",
    "docstring": "To know the values of the non-public parameters, please have a look to the defaults of .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg If Assign Assign Assign Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "_iswritemode",
    "source_code": "def _iswritemode(self, mode):\n    _writemodes = ('w', '+')\n    return any((c in _writemodes for c in mode))",
    "docstring": "Test if the given mode will open a file for writing.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:_iswritemode arg:self arg:mode arguments arg arg Assign Return return:yes Call Compare"
  },
  {
    "library": "scikit-learn",
    "name": "_check_pos_label_consistency",
    "source_code": "def _check_pos_label_consistency(pos_label, y_true):\n    if pos_label is None:\n        classes = np.unique(y_true)\n        if classes.dtype.kind in 'OUS' or not (np.array_equal(classes, [0, 1]) or np.array_equal(classes, [-1, 1]) or np.array_equal(classes, [0]) or np.array_equal(classes, [-1]) or np.array_equal(classes, [1])):\n            classes_repr = ', '.join([repr(c) for c in classes.tolist()])\n            raise ValueError(f'y_true takes value in {{{classes_repr}}} and pos_label is not specified: either make y_true take value in {{0, 1}} or {{-1, 1}} or pass pos_label explicitly.')\n        pos_label = 1\n    return pos_label",
    "docstring": "Check if need to be specified or not. In binary classification, we fix if the labels are in the set {-1, 1} or {0, 1}. Otherwise, we raise an error asking to specify the parameters. Parameters ---------- pos_label : int, float, bool, str or None The positive label. y_true : ndarray of shape (n_samples,) The target vector. Returns ------- pos_label : int, float, bool or str If can be inferred, it will be returned. Raises ------ ValueError In the case that does not have label in {-1, 1} or {0, 1}, it will raise a .",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_check_pos_label_consistency arg:pos_label arg:y_true arguments arg arg If Compare Assign Call If BoolOp Compare BoolOp Call Call Call Call Call Assign Call Call Call Raise Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "save",
    "source_code": "def save(self, representative_dataset: RepresentativeDatasetMapping) -> Mapping[str, _RepresentativeDatasetFile]:\n    dataset_file_map = {}\n    for signature_def_key, repr_ds in representative_dataset.items():\n        if signature_def_key not in self.path_map:\n            raise ValueError(f'SignatureDef key does not exist in the provided path_map: {signature_def_key}')\n        dataset_file_map[signature_def_key] = self._save_tf_record_dataset(repr_ds, signature_def_key)\n    return dataset_file_map",
    "docstring": "Saves the representative dataset. Args: representative_dataset: Signature def key -> representative dataset mapping. Each dataset is saved in a separate TFRecord file whose path matches the signature def key of . Raises: ValueError: When the signature def key in is not present in the . Returns: A map from signature key to the RepresentativeDatasetFile instance contains the path to the saved file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py",
    "ast_data": "FunctionDef name:save arg:self arg:representative_dataset arguments arg arg Assign For Call If Compare Raise Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_escape_latex_math",
    "source_code": "def _escape_latex_math(s: str) -> str:\n    s = s.replace('\\\\$', 'rt8§=§7wz')\n    ps_d = re.compile('\\\\$.*?\\\\$').search(s, 0)\n    ps_p = re.compile('\\\\(.*?\\\\)').search(s, 0)\n    mode = []\n    if ps_d:\n        mode.append(ps_d.span()[0])\n    if ps_p:\n        mode.append(ps_p.span()[0])\n    if len(mode) == 0:\n        return _escape_latex(s.replace('rt8§=§7wz', '\\\\$'))\n    if s[mode[0]] == '$':\n        return _math_mode_with_dollar(s.replace('rt8§=§7wz', '\\\\$'))\n    if s[mode[0] - 1:mode[0] + 1] == '\\\\(':\n        return _math_mode_with_parentheses(s.replace('rt8§=§7wz', '\\\\$'))\n    else:\n        return _escape_latex(s.replace('rt8§=§7wz', '\\\\$'))",
    "docstring": "All characters in LaTeX math mode are preserved. The substrings in LaTeX math mode, which either are surrounded by two characters ``, are preserved without escaping. Otherwise regular LaTeX escaping applies. Parameters ---------- s : str Input to be escaped Return ------ str : Escaped string",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\style_render.py",
    "ast_data": "FunctionDef name:_escape_latex_math arg:s arguments arg Assign Call Assign Call Call Assign Call Call Assign If Call Call If Call Call If Compare Call Return return:yes Call Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_anchor",
    "source_code": "def get_anchor(self):\n    return self._anchor",
    "docstring": "Return the anchor.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:get_anchor arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cast_if_floating_dtype",
    "source_code": "def cast_if_floating_dtype(x, dtype=None):\n    return nest.map_structure(functools.partial(cast_single_tensor, dtype=dtype), x)",
    "docstring": "Casts the given data tensors to the default floating point type. Casts only if the input is already a floating point type. Args: x: tensor or list/tuple of tensors. dtype: The dtype to which Tensors should be cast. Returns: Converted input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:cast_if_floating_dtype arg:x arg:dtype arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "pack_padded_sequence",
    "source_code": "def pack_padded_sequence(input: Tensor, lengths: Union[Tensor, list[int]], batch_first: bool=False, enforce_sorted: bool=True) -> PackedSequence:\n    if not isinstance(lengths, torch.Tensor):\n        if torch._C._get_tracing_state():\n            warnings.warn('pack_padded_sequence has been called with a Python list of sequence lengths. The tracer cannot track the data flow of Python values, and it will treat them as constants, likely rendering the trace incorrect for any other combination of lengths.', stacklevel=2)\n        lengths = torch.as_tensor(lengths, dtype=torch.int64, device='cpu')\n    else:\n        lengths = lengths.to(dtype=torch.int64)\n    if enforce_sorted:\n        sorted_indices = None\n    else:\n        lengths, sorted_indices = torch.sort(lengths, descending=True)\n        sorted_indices = sorted_indices.to(input.device)\n        batch_dim = 0 if batch_first else 1\n        input = input.index_select(batch_dim, sorted_indices)\n    data, batch_sizes = _VF._pack_padded_sequence(input, lengths, batch_first)\n    return _packed_sequence_init(data, batch_sizes, sorted_indices, None)",
    "docstring": "Packs a Tensor containing padded sequences of variable length. :attr: can be of size `batch_firstbatch_firstenforce_sorted = Falseenforce_sortedenforce_sorted = Truepad_packed_sequencepad_packed_sequencePackedSequencePackedSequencePackedSequence` object",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\rnn.py",
    "ast_data": "FunctionDef name:pack_padded_sequence arg:input arg:lengths arg:batch_first arg:enforce_sorted arguments arg arg arg arg If Call If Call Call Assign Call Assign Call If Assign Assign Call Assign Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_checkcovariance_prior_parameter",
    "source_code": "def _checkcovariance_prior_parameter(self, X):\n    _, n_features = X.shape\n    if self.covariance_prior is None:\n        self.covariance_prior_ = {'full': np.atleast_2d(np.cov(X.T)), 'tied': np.atleast_2d(np.cov(X.T)), 'diag': np.var(X, axis=0, ddof=1), 'spherical': np.var(X, axis=0, ddof=1).mean()}[self.covariance_type]\n    elif self.covariance_type in ['full', 'tied']:\n        self.covariance_prior_ = check_array(self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False)\n        _check_shape(self.covariance_prior_, (n_features, n_features), '%s covariance_prior' % self.covariance_type)\n        _check_precision_matrix(self.covariance_prior_, self.covariance_type)\n    elif self.covariance_type == 'diag':\n        self.covariance_prior_ = check_array(self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False)\n        _check_shape(self.covariance_prior_, (n_features,), '%s covariance_prior' % self.covariance_type)\n        _check_precision_positivity(self.covariance_prior_, self.covariance_type)\n    else:\n        self.covariance_prior_ = self.covariance_prior",
    "docstring": "Check the . Parameters ---------- X : array-like of shape (n_samples, n_features)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_checkcovariance_prior_parameter arg:self arg:X arguments arg arg Assign If Compare Assign Call Call Call Call Call Call Call If Compare Assign Call Call Call If Compare Assign Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "CompileCommand",
    "source_code": "@dataclasses.dataclass\nclass CompileCommand:\n    file: str\n    arguments: list[str]\n\n    @classmethod\n    def from_args_list(cls, args_list: list[str]) -> 'CompileCommand':\n        cc_file = None\n        filtered_args = []\n        for arg in args_list:\n            if arg in _DISALLOWED_ARGS:\n                continue\n            if arg.endswith('.cc'):\n                cc_file = arg\n            filtered_args.append(arg)\n        return cls(cc_file, filtered_args)\n\n    def to_dumpable_json(self, directory: str) -> _JSONDict:\n        return {'directory': directory, 'file': self.file, 'arguments': self.arguments}",
    "docstring": "Represents a compilation command with options on a specific file.",
    "type": "class",
    "file_path": "tensorflow\\third_party\\xla\\build_tools\\lint\\generate_compile_commands.py",
    "ast_data": "ClassDef name:CompileCommand FunctionDef name:from_args_list arg:cls arg:args_list arguments arg arg Assign Assign For If Compare If Call Assign Call Return return:yes Call FunctionDef name:to_dumpable_json arg:self arg:directory arguments arg arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "filter_files",
    "source_code": "def filter_files(prefix, suffix, files, remove_prefix=None):\n    filtered, rest = ([], [])\n    match = re.compile(prefix + '.*' + suffix + '\\\\Z').match\n    if remove_prefix:\n        ind = len(prefix)\n    else:\n        ind = 0\n    for file in [x.strip() for x in files]:\n        if match(file):\n            filtered.append(file[ind:])\n        else:\n            rest.append(file)\n    return (filtered, rest)",
    "docstring": "Filter files by prefix and suffix.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\f2py2e.py",
    "ast_data": "FunctionDef name:filter_files arg:prefix arg:suffix arg:files arg:remove_prefix arguments arg arg arg arg Assign Assign Call If Assign Call Assign For Call If Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "query",
    "source_code": "def query(self) -> bool:\n    return super().query()",
    "docstring": "Check if all work currently captured by event has completed. Returns: A boolean indicating if all work currently captured by event has completed.",
    "type": "method",
    "file_path": "pytorch\\torch\\xpu\\streams.py",
    "ast_data": "FunctionDef name:query arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "__len__",
    "source_code": "def __len__(self) -> int:\n    return len(self._mgr)",
    "docstring": "Return the length of the Series.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_process_debug_graph_node",
    "source_code": "def _process_debug_graph_node(self, node):\n    if is_debug_node(node.name):\n        return\n    if node.name in self._node_inputs:\n        raise ValueError(\"Duplicate node name on device %s: '%s'\" % (self._device_name, node.name))\n    self._node_attributes[node.name] = node.attr\n    self._node_inputs[node.name] = []\n    self._node_ctrl_inputs[node.name] = []\n    self._node_recipients[node.name] = []\n    self._node_ctrl_recipients[node.name] = []\n    if node.name not in self._node_devices:\n        self._node_devices[node.name] = set()\n    self._node_devices[node.name].add(node.device if node.device else self._device_name)\n    self._node_op_types[node.name] = node.op\n    self._ref_args[node.name] = self._get_ref_args(node)\n    for inp in node.input:\n        if is_copy_node(inp) and (node.op == '_Send' or node.op == '_Retval'):\n            self._copy_send_nodes.append(node.name)\n        if inp.startswith('^'):\n            cinp = inp[1:]\n            self._node_ctrl_inputs[node.name].append(cinp)\n        else:\n            self._node_inputs[node.name].append(inp)",
    "docstring": "Process a node from the debug GraphDef. Args: node: (NodeDef) A partition-graph node to be processed. Raises: ValueError: If duplicate node names are encountered.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py",
    "ast_data": "FunctionDef name:_process_debug_graph_node arg:self arg:node arguments arg arg If Call Return return:no If Compare Raise Call Assign Assign Assign Assign Assign If Compare Assign Call Call Assign Assign Call For If BoolOp Call BoolOp Compare Compare Call If Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "find_mismatched_vars",
    "source_code": "def find_mismatched_vars(var, types, allow_none=False):\n    mismatched_vars = set()\n    if isinstance(var, (TupleVariable, ListVariable)):\n        for item in var.items:\n            mismatched_vars.update(find_mismatched_vars(item, types, allow_none))\n    elif isinstance(var, ConstDictVariable):\n        for value in var.items.values():\n            mismatched_vars.update(find_mismatched_vars(value, types, allow_none))\n    else:\n\n        def _is_none(var):\n            return var.is_python_constant() and var.as_python_constant() is None\n        if not isinstance(var, types) and (not (allow_none and _is_none(var))):\n            mismatched_vars.add(var)\n    return mismatched_vars",
    "docstring": "Recursively finds variables whose type is not an instance of the specified types. Args: var: The variable to check. types: A tuple of allowed types. allow_none (bool): Whether to allow None values. Defaults to False. Returns: A set of variables whose type is not an instance of the specified types.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\higher_order_ops.py",
    "ast_data": "FunctionDef name:find_mismatched_vars arg:var arg:types arg:allow_none arguments arg arg arg Assign Call If Call For Call Call If Call For Call Call Call FunctionDef name:_is_none arg:var arguments arg Return return:yes BoolOp Call Compare Call If BoolOp Call BoolOp Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, boxes: Tensor, confidence_threshold: Optional[Tensor]=None, classes_to_keep: Optional[Tensor]=None) -> Union[Tensor, List[Tensor]]:\n    zero_tensor = tensor(0.0, device=boxes.device, dtype=boxes.dtype)\n    confidence_threshold = confidence_threshold or self.confidence_threshold or zero_tensor\n    confidence_mask = boxes[:, :, 1] > confidence_threshold\n    classes_to_keep = classes_to_keep or self.classes_to_keep\n    if classes_to_keep is not None:\n        class_ids = boxes[:, :, 0:1]\n        classes_to_keep = classes_to_keep.view(1, 1, -1)\n        class_mask = (class_ids == classes_to_keep).any(dim=-1)\n    else:\n        class_mask = (confidence_mask * 0 + 1).bool()\n    combined_mask = confidence_mask & class_mask\n    if self.filter_as_zero:\n        filtered_boxes = boxes * combined_mask[:, :, None]\n        return filtered_boxes\n    filtered_boxes_list = []\n    for i in range(boxes.shape[0]):\n        box = boxes[i]\n        mask = combined_mask[i]\n        valid_boxes = box[mask]\n        filtered_boxes_list.append(valid_boxes)\n    return filtered_boxes_list",
    "docstring": "Filter boxes according to the desired threshold. To be ONNX-friendly, the inputs for direct forwarding need to be all tensors. Args: boxes: [B, D, 6], where B is the batchsize, D is the number of detections in the image, 6 represent (class_id, confidence_score, x, y, w, h). confidence_threshold: an 0-d scalar that represents the desired threshold. classes_to_keep: a 1-d tensor of classes to keep. If None, keep all classes. Returns: Union[Tensor, List[Tensor]] If is True, return a tensor of shape [D, 6], where D is the total number of detections as input. If is False, return a list of tensors of shape [D, 6], where D is the number of valid detections for each element in the batch.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\detection\\utils.py",
    "ast_data": "FunctionDef name:forward arg:self arg:boxes arg:confidence_threshold arg:classes_to_keep arguments arg arg arg arg Assign Call Assign BoolOp Assign Compare Assign BoolOp If Compare Assign Assign Call Assign Call Compare Assign Call Assign If Assign Return return:yes Assign For Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_addsub_object_array",
    "source_code": "@final\ndef _addsub_object_array(self, other: npt.NDArray[np.object_], op) -> np.ndarray:\n    assert op in [operator.add, operator.sub]\n    if len(other) == 1 and self.ndim == 1:\n        return op(self, other[0])\n    if get_option('performance_warnings'):\n        warnings.warn(f'Adding/subtracting object-dtype array to {type(self).__name__} not vectorized.', PerformanceWarning, stacklevel=find_stack_level())\n    assert self.shape == other.shape, (self.shape, other.shape)\n    res_values = op(self.astype('O'), np.asarray(other))\n    return res_values",
    "docstring": "Add or subtract array-like of DateOffset objects Parameters ---------- other : np.ndarray[object] op : {operator.add, operator.sub} Returns ------- np.ndarray[object] Except in fastpath case with length 1 where we operate on the contained scalar.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_addsub_object_array arg:self arg:other arg:op arguments arg arg arg Compare If BoolOp Compare Call Compare Return return:yes Call If Call Call Call Call Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "make_fx",
    "source_code": "def make_fx(f: Callable, decomposition_table: Optional[Mapping[OpOverload, Callable]]=None, tracing_mode: str='real', _allow_non_fake_inputs: bool=False, *, pre_dispatch: bool=False, record_module_stack: bool=False, _allow_fake_constant: bool=False, _error_on_data_dependent_ops: bool=True) -> Callable[..., GraphModule]:\n    assert tracing_mode in ['real', 'fake', 'symbolic']\n    make_fx_tracer = _MakefxTracer(decomposition_table, tracing_mode, _allow_non_fake_inputs, pre_dispatch, record_module_stack, _allow_fake_constant, _error_on_data_dependent_ops)\n\n    @functools.wraps(f)\n    def wrapped(*args: object) -> GraphModule:\n        return make_fx_tracer.trace(f, *args)\n    return wrapped",
    "docstring": "Given a function f, return a new function which when executed with valid arguments to f, returns an FX GraphModule representing the set of operations that were executed during the course of execution.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py",
    "ast_data": "FunctionDef name:make_fx arg:f arg:decomposition_table arg:tracing_mode arg:_allow_non_fake_inputs arguments arg arg arg arg arg arg arg arg Compare Assign Call FunctionDef name:wrapped arguments arg Return return:yes Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "LocalResponseNorm",
    "source_code": "class LocalResponseNorm(Module):\n    __constants__ = ['size', 'alpha', 'beta', 'k']\n    size: int\n    alpha: float\n    beta: float\n    k: float\n\n    def __init__(self, size: int, alpha: float=0.0001, beta: float=0.75, k: float=1.0) -> None:\n        super().__init__()\n        self.size = size\n        self.alpha = alpha\n        self.beta = beta\n        self.k = k\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.local_response_norm(input, self.size, self.alpha, self.beta, self.k)\n\n    def extra_repr(self):\n        return '{size}, alpha={alpha}, beta={beta}, k={k}'.format(**self.__dict__)",
    "docstring": "Applies local response normalization over an input signal. The input signal is composed of several input planes, where channels occupy the second dimension. Applies normalization across channels. .. math:: b_{c} = a_{c}\\left(k + \\frac{\\alpha}{n} \\sum_{c'=\\max(0, c-n/2)}^{\\min(N-1,c+n/2)}a_{c'}^2\\right)^{-\\beta} Args: size: amount of neighbouring channels used for normalization alpha: multiplicative factor. Default: 0.0001 beta: exponent. Default: 0.75 k: additive factor. Default: 1 Shape: - Input: :math: - Output: :math: (same shape as input) Examples:: >>> lrn = nn.LocalResponseNorm(2) >>> signal_2d = torch.randn(32, 5, 24, 24) >>> signal_4d = torch.randn(16, 5, 7, 7, 7, 7) >>> output_2d = lrn(signal_2d) >>> output_4d = lrn(signal_4d)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\normalization.py",
    "ast_data": "ClassDef name:LocalResponseNorm Assign FunctionDef name:__init__ arg:self arg:size arg:alpha arg:beta arg:k arguments arg arg arg arg arg Call Call Assign Assign Assign Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "get_api_versions",
    "source_code": "def get_api_versions(apiversion):\n    sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))\n    try:\n        m = __import__('genapi')\n        numpy_api = __import__('numpy_api')\n        curapi_hash = m.fullapi_hash(numpy_api.full_api)\n        apis_hash = m.get_versions_hash()\n    finally:\n        del sys.path[0]\n    return (curapi_hash, apis_hash[apiversion])",
    "docstring": "Return current C API checksum and the recorded checksum. Return current C API checksum and the recorded checksum for the given version of the C API version.",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\code_generators\\verify_c_api_version.py",
    "ast_data": "FunctionDef name:get_api_versions arg:apiversion arguments arg Call Call Call Try Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "crelu",
    "source_code": "@tf_export(v1=['nn.crelu'])\n@dispatch.add_dispatch_support\ndef crelu(features, name=None, axis=-1):\n    with ops.name_scope(name, 'CRelu', [features]) as name:\n        features = ops.convert_to_tensor(features, name='features')\n        c = array_ops.concat([features, -features], axis, name=name)\n        return gen_nn_ops.relu(c)",
    "docstring": "Computes Concatenated ReLU. Concatenates a ReLU which selects only the positive part of the activation with a ReLU which selects only the *negative* part of the activation. Note that as a result this non-linearity doubles the depth of the activations. Source: [Understanding and Improving Convolutional Neural Networks via Concatenated Rectified Linear Units. W. Shang, et al.]( Args: features: A with type , , , , , , or . name: A name for the operation (optional). axis: The axis that the output values are concatenated along. Default is -1. Returns: A with the same type as . References: Understanding and Improving Convolutional Neural Networks via Concatenated Rectified Linear Units: [Shang et al., 2016]( ([pdf](",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:crelu arg:features arg:name arg:axis arguments arg arg arg With Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "update",
    "source_code": "def update(self, *args, **kwargs) -> None:\n    if isinstance(self.params, dict):\n        self.params.update(*args, **kwargs)",
    "docstring": "Update self.params with supplied args.",
    "type": "method",
    "file_path": "pandas\\pandas\\util\\_decorators.py",
    "ast_data": "FunctionDef name:update arg:self arguments arg arg arg If Call Call"
  },
  {
    "library": "kornia",
    "name": "keypoints_labels",
    "source_code": "@property\ndef keypoints_labels(self) -> Optional[Tensor]:\n    return self.points[1] if isinstance(self.points, tuple) else None",
    "docstring": "The keypoints labels from the .",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\models\\structures.py",
    "ast_data": "FunctionDef name:keypoints_labels arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "init_pool_generator",
    "source_code": "def init_pool_generator(gens, random_seed=None, id_queue=None):\n    global _SHARED_SEQUENCES\n    _SHARED_SEQUENCES = gens\n    worker_proc = multiprocessing.current_process()\n    worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)\n    if random_seed is not None:\n        np.random.seed(random_seed + worker_proc.ident)\n    if id_queue is not None:\n        id_queue.put(worker_proc.ident, block=True, timeout=0.1)",
    "docstring": "Initializer function for pool workers. Args: gens: State which should be made available to worker processes. random_seed: An optional value with which to seed child processes. id_queue: A multiprocessing Queue of worker ids. This is used to indicate that a worker process was created by Keras and can be terminated using the cleanup_all_keras_forkpools utility.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:init_pool_generator arg:gens arg:random_seed arg:id_queue arguments arg arg arg Assign Assign Call Assign Call If Compare Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_getter",
    "source_code": "def _make_getter(captured_getter, captured_previous):\n    return lambda **kwargs: captured_getter(captured_previous, **kwargs)",
    "docstring": "Gets around capturing loop variables in python being broken.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:_make_getter arg:captured_getter arg:captured_previous arguments arg arg Return return:yes arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_perform_local_step",
    "source_code": "def _perform_local_step(bucket: dist.GradBucket, zero: ZeroRedundancyOptimizer, rank: int):\n    overlap_info = zero._overlap_info\n    bucket_index = bucket.index()\n    assert len(zero.optim.param_groups) == 1, 'Overlapping DDP with ZeRO only supports a single parameter group'\n    num_local_optim_params = len(zero.optim.param_groups[0]['params'])\n    gradients: list[Optional[torch.Tensor]] = [_NO_PARAM_UPDATE for _ in range(num_local_optim_params)]\n    assert bucket_index in overlap_info.offsets, f'Bucket index {bucket_index} was not assigned to rank {rank}'\n    gradients_offset = overlap_info.offsets[bucket_index]\n    bucket_assignment = zero._bucket_assignments_per_rank[rank][bucket_index]\n    bucket_offset = bucket_assignment.offset\n    length = len(bucket_assignment.parameters)\n    bucket_gradients = bucket.gradients()[bucket_offset:bucket_offset + length]\n    for i, grad in enumerate(bucket_gradients):\n        gradients[gradients_offset + i] = grad\n    zero._local_step(gradients)",
    "docstring": "Perform a local optimizer step using the gradients provided by `ZeroRedundancyOptimizer_local_step`. rank (int): the calling process's rank. .. warning:: This function assumes that appropriate synchronization has taken place so that the bucket's gradients can be used.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\ddp_zero_hook.py",
    "ast_data": "FunctionDef name:_perform_local_step arg:bucket arg:zero arg:rank arguments arg arg arg Assign Assign Call Compare Call Assign Call Call Compare Assign Assign Assign Assign Call Assign Call For Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "composition_to_dims",
    "source_code": "def composition_to_dims(composition: Sequence[Union[list[Union[str, AnonymousAxis]], str]]) -> list[Union[str, tuple[str, ...]]]:\n    dim_composition: list[Union[str, tuple[str, ...]]] = []\n    for dimension in composition:\n        if isinstance(dimension, list):\n            dim_composition.append(tuple((dim for identifier in dimension for dim in identifier_dim_map[identifier])))\n        elif dimension == _ellipsis:\n            dim_composition.extend(identifier_dim_map[_ellipsis])\n        else:\n            raise ValueError(f'Unexpected dimension: {dimension}')\n    return dim_composition",
    "docstring": "Convert a into a index of strings representing first class dims.",
    "type": "function",
    "file_path": "pytorch\\functorch\\einops\\rearrange.py",
    "ast_data": "FunctionDef name:composition_to_dims arg:composition arguments arg For If Call Call Call If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "unregister",
    "source_code": "def unregister(self, name):\n    if name in self._BUILTIN_COLOR_SEQUENCES:\n        raise ValueError(f'Cannot unregister builtin color sequence {name!r}')\n    self._color_sequences.pop(name, None)",
    "docstring": "Remove a sequence from the registry. You cannot remove built-in color sequences. If the name is not registered, returns with no error.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:unregister arg:self arg:name arguments arg arg If Compare Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_train_batch_begin",
    "source_code": "@doc_controls.for_subclass_implementers\n@generic_utils.default\ndef on_train_batch_begin(self, batch, logs=None):\n    self.on_batch_begin(batch, logs=logs)",
    "docstring": "Called at the beginning of a training batch in methods. Subclasses should override for any actions to run. Note that if the argument to in is set to , this method will only be called every batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of . Typically, the values of the 's metrics are returned. Example: .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_train_batch_begin arg:self arg:batch arg:logs arguments arg arg arg Call"
  },
  {
    "library": "sphinx",
    "name": "read",
    "source_code": "@classmethod\ndef read(cls: type[Config], confdir: str | os.PathLike[str], overrides: dict[str, Any] | None=None, tags: Tags | None=None) -> Config:\n    filename = Path(confdir, CONFIG_FILENAME)\n    if not filename.is_file():\n        raise ConfigError(__(\"config directory doesn't contain a conf.py file (%s)\") % confdir)\n    namespace = eval_config_file(filename, tags)\n    if namespace.get('language', ...) is None:\n        logger.warning(__(\"Invalid configuration value found: 'language = None'. Update your configuration to a valid language code. Falling back to 'en' (English).\"))\n        namespace['language'] = 'en'\n    return cls(namespace, overrides)",
    "docstring": "Create a Config object from configuration file.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\config.py",
    "ast_data": "FunctionDef name:read arg:cls arg:confdir arg:overrides arg:tags arguments arg arg arg arg Assign Call If Call Raise Call Call Assign Call If Compare Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "clone",
    "source_code": "def clone(self) -> 'set_grad_enabled':\n    return self.__class__(self.mode)",
    "docstring": "Create a copy of this class",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\grad_mode.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_n_splits",
    "source_code": "def get_n_splits(self, X, y=None, groups=None):\n    if X is None:\n        raise ValueError(\"The 'X' parameter should not be None.\")\n    return int(comb(_num_samples(X), self.p, exact=True))",
    "docstring": "Returns the number of splitting iterations in the cross-validator. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:get_n_splits arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_minor_locator",
    "source_code": "def set_minor_locator(self, locator):\n    _api.check_isinstance(mticker.Locator, locator=locator)\n    self.isDefault_minloc = False\n    self.minor.locator = locator\n    if self.minor.formatter:\n        self.minor.formatter._set_locator(locator)\n    locator.set_axis(self)\n    self.stale = True",
    "docstring": "Set the locator of the minor ticker. Parameters ---------- locator :",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:set_minor_locator arg:self arg:locator arguments arg arg Call Assign Assign If Call Call Assign"
  },
  {
    "library": "django",
    "name": "_sort_migrations",
    "source_code": "def _sort_migrations(self):\n    for app_label, ops in sorted(self.generated_operations.items()):\n        ts = TopologicalSorter()\n        for op in ops:\n            ts.add(op)\n            for dep in op._auto_deps:\n                dep = self._resolve_dependency(dep)[0]\n                if dep.app_label != app_label:\n                    continue\n                ts.add(op, *(x for x in ops if self.check_dependency(x, dep)))\n        self.generated_operations[app_label] = list(ts.static_order())",
    "docstring": "Reorder to make things possible. Reordering may be needed so FKs work nicely inside the same app.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:_sort_migrations arg:self arguments arg For Call Call Assign Call For Call For Assign Call If Compare Call Call Assign Call Call"
  },
  {
    "library": "sphinx",
    "name": "SphinxDomains",
    "source_code": "class SphinxDomains(SphinxTransform):\n    default_priority = 850\n\n    def apply(self, **kwargs: Any) -> None:\n        self.env.domains._process_doc(self.env, self.env.docname, self.document)",
    "docstring": "Collect objects to Sphinx domains for cross references.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\references.py",
    "ast_data": "ClassDef name:SphinxDomains Assign FunctionDef name:apply arg:self arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "_PositiveSemidefinite",
    "source_code": "class _PositiveSemidefinite(_Symmetric):\n\n    def check(self, value):\n        sym_check = super().check(value)\n        if not sym_check.all():\n            return sym_check\n        return torch.linalg.eigvalsh(value).ge(0).all(-1)",
    "docstring": "Constrain to positive-semidefinite matrices.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_PositiveSemidefinite FunctionDef name:check arg:self arg:value arguments arg arg Assign Call Call If Call Return return:yes Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_label",
    "source_code": "def get_label(self):\n    return self.label",
    "docstring": "[*Discouraged*] Return the axis label as a Text instance. .. admonition:: Discouraged This overrides , which is for legend labels, with a new semantic. It is recommended to use the attribute `` instead.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_label arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "converted_function_names",
    "source_code": "@property\ndef converted_function_names(self):\n    if self._converted_function_names is None:\n        parsed_names = []\n        for name in self.functions:\n            elements = name.rsplit('_', 1)\n            if len(elements) == 2 and elements[1].isnumeric():\n                parsed_names.append((int(elements[1]), elements[0], name))\n            else:\n                parsed_names.append((-1, name, name))\n        self._converted_function_names = {name: '{}_frozen_{}'.format(base_name, ops.uid()) for _, base_name, name in sorted(parsed_names)}\n    return self._converted_function_names",
    "docstring": "Map from original to new function names. In order to avoid conflicts (two functions with the same name, one converted and one not), we need to change the name of every converted function to something that is hopefully unique. Returns: Map from original to new suggested function names.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:converted_function_names arg:self arguments arg If Compare Assign For Assign Call If BoolOp Compare Call Call Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "binary_crossentropy",
    "source_code": "@dispatch.add_dispatch_support\ndef binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0, axis=-1):\n    y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n    y_true = math_ops.cast(y_true, y_pred.dtype)\n    label_smoothing = tensor_conversion.convert_to_tensor_v2_with_dispatch(label_smoothing, dtype=backend.floatx())\n\n    def _smooth_labels():\n        return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing\n    y_true = smart_cond.smart_cond(label_smoothing, _smooth_labels, lambda: y_true)\n    return backend.mean(backend.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=axis)",
    "docstring": "Computes the binary crossentropy loss. Standalone usage: >>> y_true = [[0, 1], [0, 0]] >>> y_pred = [[0.6, 0.4], [0.4, 0.6]] >>> loss = tf.keras.losses.binary_crossentropy(y_true, y_pred) >>> assert loss.shape == (2,) >>> loss.numpy() array([0.916 , 0.714], dtype=float32) Args: y_true: Ground truth values. shape = . y_pred: The predicted values. shape = . from_logits: Whether is expected to be a logits tensor. By default, we assume that encodes a probability distribution. label_smoothing: Float in [0, 1]. If > then smooth the labels by squeezing them towards 0.5 That is, using for the target class and for the non-target class. axis: The axis along which the mean is computed. Defaults to -1. Returns: Binary crossentropy loss value. shape = .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:binary_crossentropy arg:y_true arg:y_pred arg:from_logits arg:label_smoothing arg:axis arguments arg arg arg arg arg Assign Call Assign Call Assign Call Call FunctionDef name:_smooth_labels arguments Return return:yes Assign Call arguments Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "call_method",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef call_method(self, target: 'Target', args: tuple[Argument, ...], kwargs: dict[str, Any]) -> Any:\n    self_obj, *args_tail = args\n    assert isinstance(target, str)\n    return getattr(self_obj, target)(*args_tail, **kwargs)",
    "docstring": "Execute a `Node `__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation Return Any: The value returned by the method invocation",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\interpreter.py",
    "ast_data": "FunctionDef name:call_method arg:self arg:target arg:args arg:kwargs arguments arg arg arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "iter_sequence_infinite",
    "source_code": "def iter_sequence_infinite(seq):\n    while True:\n        for item in seq:\n            yield item",
    "docstring": "Iterates indefinitely over a Sequence. Args: seq: instance. Yields: Batches of data from the .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:iter_sequence_infinite arg:seq arguments arg While For"
  },
  {
    "library": "sphinx",
    "name": "SphinxLogRecordTranslator",
    "source_code": "class SphinxLogRecordTranslator(logging.Filter):\n    LogRecordClass: type[logging.LogRecord]\n\n    def __init__(self, app: Sphinx) -> None:\n        self.app = app\n        super().__init__()\n\n    def filter(self, record: SphinxWarningLogRecord) -> bool:\n        if isinstance(record, logging.LogRecord):\n            record.__class__ = self.LogRecordClass\n        location = getattr(record, 'location', None)\n        if isinstance(location, tuple):\n            docname, lineno = location\n            if docname:\n                if lineno:\n                    record.location = f'{self.app.env.doc2path(docname)}:{lineno}'\n                else:\n                    record.location = f'{self.app.env.doc2path(docname)}'\n            else:\n                record.location = None\n        elif isinstance(location, nodes.Node):\n            record.location = get_node_location(location)\n        elif location and ':' not in location:\n            record.location = f'{self.app.env.doc2path(location)}'\n        return True",
    "docstring": "Converts a log record to one Sphinx expects * Make a instance of SphinxLogRecord * docname to path if location given * append warning type/subtype to message if :confval: is ``",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "ClassDef name:SphinxLogRecordTranslator FunctionDef name:__init__ arg:self arg:app arguments arg arg Assign Call Call FunctionDef name:filter arg:self arg:record arguments arg arg If Call Assign Assign Call If Call Assign If If Assign Call Assign Call Assign If Call Assign Call If BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_plot_single_rug",
    "source_code": "def _plot_single_rug(self, sub_data, var, height, ax, kws):\n    vector = sub_data[var]\n    n = len(vector)\n    _, inv = _get_transform_functions(ax, var)\n    vector = inv(vector)\n    if 'hue' in self.variables:\n        colors = self._hue_map(sub_data['hue'])\n    else:\n        colors = None\n    if var == 'x':\n        trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n        xy_pairs = np.column_stack([np.repeat(vector, 2), np.tile([0, height], n)])\n    if var == 'y':\n        trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n        xy_pairs = np.column_stack([np.tile([0, height], n), np.repeat(vector, 2)])\n    line_segs = xy_pairs.reshape([n, 2, 2])\n    ax.add_collection(LineCollection(line_segs, transform=trans, colors=colors, **kws))\n    ax.autoscale_view(scalex=var == 'x', scaley=var == 'y')",
    "docstring": "Draw a rugplot along one axis of the plot.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\distributions.py",
    "ast_data": "FunctionDef name:_plot_single_rug arg:self arg:sub_data arg:var arg:height arg:ax arg:kws arguments arg arg arg arg arg arg Assign Assign Call Assign Call Assign Call If Compare Assign Call Assign If Compare Assign Call Assign Call Call Call If Compare Assign Call Assign Call Call Call Assign Call Call Call Call Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "on_predict_begin",
    "source_code": "@doc_controls.for_subclass_implementers\ndef on_predict_begin(self, logs=None):\n    pass",
    "docstring": "Called at the beginning of prediction. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_predict_begin arg:self arg:logs arguments arg arg"
  },
  {
    "library": "pygame",
    "name": "get_linesize",
    "source_code": "def get_linesize(self):\n    return self.get_sized_height()",
    "docstring": "get_linesize() -> int get the line space of the font text",
    "type": "method",
    "file_path": "pygame\\src_py\\ftfont.py",
    "ast_data": "FunctionDef name:get_linesize arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_extract_symbolized_tb",
    "source_code": "def _extract_symbolized_tb(tb, skip):\n    stack = traceback.StackSummary()\n    for f in reversed(tb[skip:]):\n        stack.append(traceback.FrameSummary(f['filename'], f['line'], f['name']))\n    return stack",
    "docstring": "Given a symbolized traceback from symbolize_tracebacks, return a StackSummary object of pre-processed stack trace entries.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_traceback.py",
    "ast_data": "FunctionDef name:_extract_symbolized_tb arg:tb arg:skip arguments arg arg Assign Call For Call Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, *args, **kwargs):\n    self.args = args\n    self.kwargs = kwargs\n    self.ready = False",
    "docstring": "Initialize the SCGI server parameters.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "get_template",
    "source_code": "def get_template(self, template_name):\n    template, origin = self.find_template(template_name)\n    if not hasattr(template, 'render'):\n        template = Template(template, origin, template_name, engine=self)\n    return template",
    "docstring": "Return a compiled Template object for the given template name, handling template inheritance recursively.",
    "type": "method",
    "file_path": "django\\django\\template\\engine.py",
    "ast_data": "FunctionDef name:get_template arg:self arg:template_name arguments arg arg Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "do_not_convert",
    "source_code": "@tf_export('autograph.experimental.do_not_convert')\ndef do_not_convert(func=None):\n    if func is None:\n        return do_not_convert\n\n    def wrapper(*args, **kwargs):\n        with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):\n            return func(*args, **kwargs)\n    if inspect.isfunction(func) or inspect.ismethod(func):\n        wrapper = functools.update_wrapper(wrapper, func)\n    return autograph_artifact(wrapper)",
    "docstring": "Decorator that suppresses the conversion of a function. Args: func: function to decorate. Returns: If is not None, returns a which is equivalent to , but is not converted by AutoGraph. If is None, returns a decorator that, when invoked with a single argument, returns a equivalent to the above case.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py",
    "ast_data": "FunctionDef name:do_not_convert arg:func arguments arg If Compare Return return:yes FunctionDef name:wrapper arguments arg arg With Call Return return:yes Call If BoolOp Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "one_hot",
    "source_code": "def one_hot(labels: Tensor, num_classes: int, device: torch.device, dtype: torch.dtype, eps: float=1e-06) -> Tensor:\n    if not isinstance(labels, Tensor):\n        raise TypeError(f'Input labels type is not a Tensor. Got {type(labels)}')\n    if not labels.dtype == torch.int64:\n        raise ValueError(f'labels must be of the same dtype torch.int64. Got: {labels.dtype}')\n    if num_classes < 1:\n        raise ValueError(f'The number of classes must be bigger than one. Got: {num_classes}')\n    shape = labels.shape\n    one_hot = zeros((shape[0], num_classes) + shape[1:], device=device, dtype=dtype)\n    return one_hot.scatter_(1, labels.unsqueeze(1), 1.0) + eps",
    "docstring": "Convert an integer label x-D tensor to a one-hot (x+1)-D tensor. Args: labels: tensor with labels of shape :math:, where N is batch size. Each value is an integer representing correct classification. num_classes: number of classes in labels. device: the desired device of returned tensor. dtype: the desired data type of returned tensor. eps: epsilon for numerical stability. Returns: the labels in one hot tensor of shape :math:, Examples: >>> labels = torch.LongTensor([[[0, 1], [2, 0]]]) >>> one_hot(labels, num_classes=3, device=torch.device('cpu'), dtype=torch.int64) tensor([[[[1.0000e+00, 1.0000e-06], [1.0000e-06, 1.0000e+00]], [[1.0000e-06, 1.0000e+00], [1.0000e-06, 1.0000e-06]], [[1.0000e-06, 1.0000e-06], [1.0000e+00, 1.0000e-06]]]])",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\one_hot.py",
    "ast_data": "FunctionDef name:one_hot arg:labels arg:num_classes arg:device arg:dtype arg:eps arguments arg arg arg arg arg If Call Raise Call Call If Compare Raise Call If Compare Raise Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "num_accelerators",
    "source_code": "def num_accelerators(self, task_type=None, task_id=None, config_proto=None):\n    master = self.master(task_type, task_id)\n    devices = get_accelerator_devices(master, config_proto)\n    mapping = collections.defaultdict(int)\n    for device in devices:\n        if task_type is not None and task_id is not None:\n            job_path = '/job:%s' % task_type\n            task_path = '/task:%s' % task_id\n            if job_path not in device.name or task_path not in device.name:\n                continue\n        mapping[device.device_type] += 1\n    return mapping",
    "docstring": "Returns the number of accelerator cores per worker. This returns the number of accelerator cores (such as GPUs and TPUs) available per worker. Optionally, we allow callers to specify the task_type, and task_id, for if they want to target a specific TensorFlow task to query the number of accelerators. This is to support heterogenous environments, where the number of accelerators cores per host is different. Args: task_type: (Optional) The type of the TensorFlow task of the machine we want to query. task_id: (Optional) The index of the TensorFlow task of the machine we want to query. config_proto: (Optional) Configuration for starting a new session to query how many accelerator cores it has. Returns: A map of accelerator types to number of cores.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:num_accelerators arg:self arg:task_type arg:task_id arg:config_proto arguments arg arg arg arg Assign Call Assign Call Assign Call For If BoolOp Compare Compare Assign Assign If BoolOp Compare Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "as_control_dep",
    "source_code": "def as_control_dep(name: str) -> str:\n    return '^' + name.split(':')[0]",
    "docstring": "Returns the input as a control dependency.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:as_control_dep arg:name arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_func_graphs",
    "source_code": "def get_func_graphs(op):\n\n    def _get_func_graph_for_branch(name_attr_list, cached_attr_name=None):\n        func_graph = None\n        if cached_attr_name is not None:\n            func_graph = getattr(op, cached_attr_name, None)\n        inputs = op.inputs[1:]\n        if func_graph is None:\n            input_shapes = [t.shape for t in inputs]\n            func_graph = util.get_func_graph(op, input_shapes, name_attr_list.name)\n        for external_t, internal_t in zip(inputs, func_graph.inputs):\n            handle_data_util.copy_handle_data(external_t, internal_t)\n        func_graph.function_captures.reset_captures(inputs, func_graph.inputs)\n        func_graph._forward_cond = op\n        return func_graph\n    if op.type in ['If', 'StatelessIf']:\n        return (_get_func_graph_for_branch(op.get_attr('then_branch'), '_true_graph'), _get_func_graph_for_branch(op.get_attr('else_branch'), '_false_graph'))\n    elif op.type in ['Case', 'StatelessCase']:\n        return [_get_func_graph_for_branch(branch_fn, '_branch_graph_{}'.format(i)) for i, branch_fn in enumerate(op.get_attr('branches'))]\n    else:\n        raise ValueError('Unsupported op type: {}'.format(op.type))",
    "docstring": "Returns s for the input op branches. Args: op: The If or Case Operation. Returns: A tuple of the s of the then_branch and else_branch (all branches for Case).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:get_func_graphs arg:op arguments arg FunctionDef name:_get_func_graph_for_branch arg:name_attr_list arg:cached_attr_name arguments arg arg Assign If Compare Assign Call Assign If Compare Assign Assign Call For Call Call Call Assign Return return:yes If Compare Return return:yes Call Call Call Call If Compare Return return:yes Call Call Call Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_escape_and_apply_props",
    "source_code": "def _escape_and_apply_props(s, prop):\n    commands = []\n    families = {'serif': '\\\\rmfamily', 'sans': '\\\\sffamily', 'sans-serif': '\\\\sffamily', 'monospace': '\\\\ttfamily'}\n    family = prop.get_family()[0]\n    if family in families:\n        commands.append(families[family])\n    elif not mpl.rcParams['pgf.rcfonts']:\n        commands.append('\\\\fontfamily{\\\\familydefault}')\n    elif any((font.name == family for font in fm.fontManager.ttflist)):\n        commands.append('\\\\ifdefined\\\\pdftexversion\\\\else\\\\setmainfont{%s}\\\\rmfamily\\\\fi' % family)\n    else:\n        _log.warning('Ignoring unknown font: %s', family)\n    size = prop.get_size_in_points()\n    commands.append('\\\\fontsize{%f}{%f}' % (size, size * 1.2))\n    styles = {'normal': '', 'italic': '\\\\itshape', 'oblique': '\\\\slshape'}\n    commands.append(styles[prop.get_style()])\n    boldstyles = ['semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black']\n    if prop.get_weight() in boldstyles:\n        commands.append('\\\\bfseries')\n    commands.append('\\\\selectfont')\n    return '{' + ''.join(commands) + '\\\\catcode`\\\\^=\\\\active\\\\def^{\\\\ifmmode\\\\sp\\\\else\\\\^{}\\\\fi}' + '\\\\catcode`\\\\%=\\\\active\\\\def%{\\\\%}' + _tex_escape(s) + '}'",
    "docstring": "Generate a TeX string that renders string *s* with font properties *prop*, also applying any required escapes to *s*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py",
    "ast_data": "FunctionDef name:_escape_and_apply_props arg:s arg:prop arguments arg arg Assign Assign Assign Call If Compare Call If Call If Call Compare Call Call Assign Call Call Assign Call Call Assign If Compare Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "aten_add_complex",
    "source_code": "@onnx_impl((aten.add.Tensor, aten.add.Scalar), trace_only=True, complex=True)\ndef aten_add_complex(self: TReal, other: TReal, alpha: float=1.0) -> TReal:\n    return aten_add(self, other, alpha=alpha)",
    "docstring": "add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_torchlib\\ops\\core.py",
    "ast_data": "FunctionDef name:aten_add_complex arg:self arg:other arg:alpha arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_slot",
    "source_code": "def get_slot(self, var, name):\n    named_slots = self._slots.get(name, None)\n    if not named_slots:\n        return None\n    slot = named_slots.get(_var_key(var), None)\n    if distribute_utils.is_distributed_variable(slot) and (not distribute_utils.is_distributed_variable(var)):\n        slot = slot._get_on_device_or_primary()\n    return slot",
    "docstring": "Return a slot named created for by the Optimizer. Some subclasses use additional variables. For example and use variables to accumulate updates. This method gives access to these objects if for some reason you need them. Use to get the list of slot names created by the . Args: var: A variable passed to or . name: A string. Returns: The for the slot if it was created, otherwise.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:get_slot arg:self arg:var arg:name arguments arg arg arg Assign Call If Return return:no Assign Call Call If BoolOp Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_ordering",
    "source_code": "def _check_ordering(self, obj):\n    if obj.ordering is None:\n        return []\n    elif not isinstance(obj.ordering, (list, tuple)):\n        return must_be('a list or tuple', option='ordering', obj=obj, id='admin.E031')\n    else:\n        return list(chain.from_iterable((self._check_ordering_item(obj, field_name, 'ordering[%d]' % index) for index, field_name in enumerate(obj.ordering))))",
    "docstring": "Check that ordering refers to existing fields or is random.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_ordering arg:self arg:obj arguments arg arg If Compare Return return:no If Call Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "is_keyword",
    "source_code": "def is_keyword(self, *names):\n    return False",
    "docstring": "Is this a name token with one of the names?",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_type1font.py",
    "ast_data": "FunctionDef name:is_keyword arg:self arguments arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "to_python",
    "source_code": "def to_python(self, value):\n    if value not in self.empty_values:\n        value = str(value)\n        if self.strip:\n            value = value.strip()\n    if value in self.empty_values:\n        return self.empty_value\n    return value",
    "docstring": "Return a string.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:to_python arg:self arg:value arguments arg arg If Compare Assign Call If Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, ndivide=1, pad=None, **kwargs):\n    self._ndivide = ndivide\n    self._pad = pad\n    super().__init__(**kwargs)",
    "docstring": "Parameters ---------- ndivide : int or None, default: 1 The number of sections to divide the legend area into. If None, use the length of the input tuple. pad : float, default: :rc: Padding in units of fraction of font size. **kwargs Keyword arguments forwarded to .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ndivide arg:pad arguments arg arg arg arg Assign Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_raise_for_unsupported_routing",
    "source_code": "def _raise_for_unsupported_routing(obj, method, **kwargs):\n    kwargs = {key: value for key, value in kwargs.items() if value is not None}\n    if _routing_enabled() and kwargs:\n        cls_name = obj.__class__.__name__\n        raise NotImplementedError(f'{cls_name}.{method} cannot accept given metadata ({set(kwargs.keys())}) since metadata routing is not yet implemented for {cls_name}.')",
    "docstring": "Raise when metadata routing is enabled and metadata is passed. This is used in meta-estimators which have not implemented metadata routing to prevent silent bugs. There is no need to use this function if the meta-estimator is not accepting any metadata, especially in , since if a meta-estimator accepts any metadata, they would do that in as well. Parameters ---------- obj : estimator The estimator for which we're raising the error. method : str The method where the error is raised. **kwargs : dict The metadata passed to the method.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_raise_for_unsupported_routing arg:obj arg:method arguments arg arg arg Assign Call Compare If BoolOp Call Assign Raise Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_mask_to_limits",
    "source_code": "def _mask_to_limits(a, limits, inclusive):\n    lower_limit, upper_limit = limits\n    lower_include, upper_include = inclusive\n    am = ma.MaskedArray(a)\n    if lower_limit is not None:\n        if lower_include:\n            am = ma.masked_less(am, lower_limit)\n        else:\n            am = ma.masked_less_equal(am, lower_limit)\n    if upper_limit is not None:\n        if upper_include:\n            am = ma.masked_greater(am, upper_limit)\n        else:\n            am = ma.masked_greater_equal(am, upper_limit)\n    if am.count() == 0:\n        raise ValueError('No array values within given limits')\n    return am",
    "docstring": "Mask an array for values outside of given limits. This is primarily a utility function. Parameters ---------- a : array limits : (float or None, float or None) A tuple consisting of the (lower limit, upper limit). Values in the input array less than the lower limit or greater than the upper limit will be masked out. None implies no limit. inclusive : (bool, bool) A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to lower or upper are allowed. Returns ------- A MaskedArray. Raises ------ A ValueError if there are no values within the given limits.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:_mask_to_limits arg:a arg:limits arg:inclusive arguments arg arg arg Assign Assign Assign Call If Compare If Assign Call Assign Call If Compare If Assign Call Assign Call If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "replace_inplace",
    "source_code": "def replace_inplace(directory, search, to_replace) -> None:\n    for root, _, files in os.walk(directory):\n        for file_name in files:\n            if file_name.endswith('.py'):\n                file_path = os.path.join(root, file_name)\n                with open(file_path, 'r', encoding='utf-8') as file:\n                    filedata = file.read()\n                if search in filedata:\n                    filedata = filedata.replace(search, to_replace)\n                    with open(file_path, 'w') as file:\n                        file.write(filedata)",
    "docstring": "Traverse the directory and replace search phrase in each file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\utils\\utils.py",
    "ast_data": "FunctionDef name:replace_inplace arg:directory arg:search arg:to_replace arguments arg arg arg For Call For If Call Assign Call With Call Assign Call If Compare Assign Call With Call Call"
  },
  {
    "library": "pytorch",
    "name": "mark_dirty",
    "source_code": "def mark_dirty(self, *args: Any, **kwargs: Any) -> None:\n    self.dirty_tensors = tuple(_iter_tensors((args, kwargs)))",
    "docstring": "See :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\function.py",
    "ast_data": "FunctionDef name:mark_dirty arg:self arguments arg arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_shallow_structure",
    "source_code": "def assert_shallow_structure(modality, shallow_tree, input_tree, check_types=True, expand_composites=False):\n    if modality == Modality.CORE:\n        _tf_core_assert_shallow_structure(shallow_tree, input_tree, check_types, expand_composites)\n    elif modality == Modality.DATA:\n        _tf_data_assert_shallow_structure(shallow_tree, input_tree, check_types)\n    else:\n        raise ValueError('Unknown modality used {} for nested structure'.format(modality))",
    "docstring": "Asserts that is a shallow structure of . This function tests if the structure can be created from the structure by replacing its leaf nodes with deeper tree structures. Examples: The following code will raise an exception: The following code will raise an exception: Args: modality: enum value of supported modality [Modality.CORE or Modality.DATA] shallow_tree: an arbitrarily nested structure. input_tree: an arbitrarily nested structure. check_types: if (default) the sequence types of and have to be the same. Note that even with check_types==True, this function will consider two different namedtuple classes with the same name and _fields attribute to be the same class. expand_composites: Valid for Modality.CORE only. If true, then composite tensors such as and are expanded into their component tensors. Raises: TypeError: If is a sequence but is not. TypeError: If the sequence types of are different from . Only raised if is . ValueError: If the sequence lengths of are different from .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:assert_shallow_structure arg:modality arg:shallow_tree arg:input_tree arg:check_types arg:expand_composites arguments arg arg arg arg arg If Compare Call If Compare Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "send_tpu_embedding_gradients",
    "source_code": "def send_tpu_embedding_gradients(inputs, config, learning_rates=None, name=None):\n    if learning_rates is None:\n        learning_rates = []\n    return gen_tpu_ops.send_tpu_embedding_gradients(inputs=inputs, learning_rates=learning_rates, config=config, name=name)",
    "docstring": "A placeholder op for feeding per-sample gradients to the embedding layer. Args: inputs: A TensorList of gradients with which to update embedding tables. This argument has the same length and shapes as the return value of RecvTPUEmbeddingActivations, but contains gradients of the model's loss with respect to the embedding activations. The embedding tables are updated from these gradients via the optimizers specified in the TPU embedding configuration given to tpu.initialize_system. config: Serialized TPUEmbeddingConfiguration proto. learning_rates: A TensorList of float32 scalars, one for each dynamic learning rate tag: see the comments in //third_party/tensorflow/core/protobuf/tpu/ optimization_parameters.proto. Multiple tables can share the same dynamic learning rate tag as specified in the configuration. If the learning rates for all tables are constant, this list should be empty. name: A name for the operation (optional). Returns: A SendTPUEmbeddingGradients operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\ops\\tpu_ops.py",
    "ast_data": "FunctionDef name:send_tpu_embedding_gradients arg:inputs arg:config arg:learning_rates arg:name arguments arg arg arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "_fast_get_system_executable",
    "source_code": "def _fast_get_system_executable(self):\n    if self.real_prefix or (self.base_prefix is not None and self.base_prefix != self.prefix):\n        if self.real_prefix is None:\n            base_executable = getattr(sys, '_base_executable', None)\n            if base_executable is not None:\n                if sys.executable != base_executable:\n                    if os.path.exists(base_executable):\n                        return base_executable\n                    major, minor = (self.version_info.major, self.version_info.minor)\n                    if self.os == 'posix' and (major, minor) >= (3, 11):\n                        base_dir = os.path.dirname(base_executable)\n                        for base_executable in [os.path.join(base_dir, exe) for exe in (f'python{major}', f'python{major}.{minor}')]:\n                            if os.path.exists(base_executable):\n                                return base_executable\n        return None\n    return self.original_executable",
    "docstring": "Try to get the system executable by just looking at properties.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\discovery\\py_info.py",
    "ast_data": "FunctionDef name:_fast_get_system_executable arg:self arguments arg If BoolOp BoolOp Compare Compare If Compare Assign Call If Compare If Compare If Call Return return:yes Assign If BoolOp Compare Compare Assign Call For Call If Call Return return:yes Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "aot_compile_cpu",
    "source_code": "def aot_compile_cpu():\n    checkpoint_path = _SMCLI_CHECKPOINT_PATH.value or os.path.join(_SMCLI_DIR.value, 'variables/variables')\n    if not _SMCLI_VARIABLES_TO_FEED.value:\n        variables_to_feed = []\n    elif _SMCLI_VARIABLES_TO_FEED.value.lower() == 'all':\n        variables_to_feed = None\n    else:\n        variables_to_feed = _SMCLI_VARIABLES_TO_FEED.value.split(',')\n    saved_model_aot_compile.aot_compile_cpu_meta_graph_def(checkpoint_path=checkpoint_path, meta_graph_def=saved_model_utils.get_meta_graph_def(_SMCLI_DIR.value, _SMCLI_TAG_SET.value), signature_def_key=_SMCLI_SIGNATURE_DEF_KEY.value, variables_to_feed=variables_to_feed, output_prefix=_SMCLI_OUTPUT_PREFIX.value, target_triple=_SMCLI_TARGET_TRIPLE.value, target_cpu=_SMCLI_TARGET_CPU.value, cpp_class=_SMCLI_CPP_CLASS.value, multithreading=_SMCLI_MULTITHREADING.value.lower() not in ('f', 'false', '0'))",
    "docstring": "Function triggered by aot_compile_cpu command.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:aot_compile_cpu arguments Assign BoolOp Call If Assign If Compare Call Assign Assign Call Call Call Compare Call"
  },
  {
    "library": "cherrypy",
    "name": "__call__",
    "source_code": "def __call__(self, environ, start_response):\n    domain = environ.get('HTTP_HOST', '')\n    if self.use_x_forwarded_host:\n        domain = environ.get('HTTP_X_FORWARDED_HOST', domain)\n    nextapp = self.domains.get(domain)\n    if nextapp is None:\n        nextapp = self.default\n    return nextapp(environ, start_response)",
    "docstring": "Route WSGI requests based on host names.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpwsgi.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:environ arg:start_response arguments arg arg arg Assign Call If Assign Call Assign Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__call__",
    "source_code": "def __call__(self, x):\n    with np.errstate(invalid='ignore'):\n        return umath.logical_or(umath.greater(x, self.b), umath.less(x, self.a))",
    "docstring": "Execute the call behavior.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg With Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, y):\n    if self.neg_label >= self.pos_label:\n        raise ValueError(f'neg_label={self.neg_label} must be strictly less than pos_label={self.pos_label}.')\n    if self.sparse_output and (self.pos_label == 0 or self.neg_label != 0):\n        raise ValueError(f'Sparse binarization is only supported with non zero pos_label and zero neg_label, got pos_label={self.pos_label} and neg_label={self.neg_label}')\n    self.y_type_ = type_of_target(y, input_name='y')\n    if 'multioutput' in self.y_type_:\n        raise ValueError('Multioutput target data is not supported with label binarization')\n    if _num_samples(y) == 0:\n        raise ValueError('y has 0 samples: %r' % y)\n    self.sparse_input_ = sp.issparse(y)\n    self.classes_ = unique_labels(y)\n    return self",
    "docstring": "Fit label binarizer. Parameters ---------- y : ndarray of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py",
    "ast_data": "FunctionDef name:fit arg:self arg:y arguments arg arg If Compare Raise Call If BoolOp BoolOp Compare Compare Raise Call Assign Call If Compare Raise Call If Compare Call Raise Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_force_skip_lazy_graph_module",
    "source_code": "@compatibility(is_backward_compatible=False)\n@contextmanager\ndef _force_skip_lazy_graph_module():\n    try:\n        global _force_skip_lazy_graph_module_flag\n        prior = _force_skip_lazy_graph_module_flag\n        _force_skip_lazy_graph_module_flag = True\n        yield\n    finally:\n        _force_skip_lazy_graph_module_flag = prior",
    "docstring": "Skip using lazy graph module disregarding the setting of _use_lazy_graph_module. Use to skip _LazyGraphModule when testing inductor torchscript related backend. torch.jit.script a _LazyGraphModule results in following error:",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\_lazy_graph_module.py",
    "ast_data": "FunctionDef name:_force_skip_lazy_graph_module arguments Try Assign Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "mutated",
    "source_code": "def mutated(self):\n    return self.mutatedx() or self.mutatedy()",
    "docstring": "Return whether the bbox has changed since init.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:mutated arg:self arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "pytorch",
    "name": "ParamInfo",
    "source_code": "class ParamInfo(NamedTuple):\n    param_name: str\n    module: nn.Module\n    module_name: str",
    "docstring": "Information for an original parameter.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "ClassDef name:ParamInfo"
  },
  {
    "library": "tensorflow",
    "name": "variables",
    "source_code": "def variables(self):\n    return self._opt.variables()",
    "docstring": "Forwarding the variables from the underlying optimizer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_optimizer.py",
    "ast_data": "FunctionDef name:variables arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_mask_setitem_value",
    "source_code": "@final\ndef _maybe_mask_setitem_value(self, indexer, value):\n    if isinstance(indexer, tuple) and len(indexer) == 2 and isinstance(value, (ABCSeries, ABCDataFrame)):\n        pi, icols = indexer\n        ndim = value.ndim\n        if com.is_bool_indexer(pi) and len(value) == len(pi):\n            newkey = pi.nonzero()[0]\n            if is_scalar_indexer(icols, self.ndim - 1) and ndim == 1:\n                if len(newkey) == 0:\n                    value = value.iloc[:0]\n                else:\n                    value = self.obj.iloc._align_series(indexer, value)\n                indexer = (newkey, icols)\n            elif isinstance(icols, np.ndarray) and icols.dtype.kind == 'i' and (len(icols) == 1):\n                if ndim == 1:\n                    value = self.obj.iloc._align_series(indexer, value)\n                    indexer = (newkey, icols)\n                elif ndim == 2 and value.shape[1] == 1:\n                    if len(newkey) == 0:\n                        value = value.iloc[:0]\n                    else:\n                        value = self.obj.iloc._align_frame(indexer, value)\n                    indexer = (newkey, icols)\n    elif com.is_bool_indexer(indexer):\n        indexer = indexer.nonzero()[0]\n    return (indexer, value)",
    "docstring": "If we have obj.iloc[mask] = series_or_frame and series_or_frame has the same length as obj, we treat this as obj.iloc[mask] = series_or_frame[mask], similar to Series.__setitem__. Note this is only for loc, not iloc.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_maybe_mask_setitem_value arg:self arg:indexer arg:value arguments arg arg arg If BoolOp Call Compare Call Call Assign Assign If BoolOp Call Compare Call Call Assign Call If BoolOp Call Compare If Compare Call Assign Assign Call Assign If BoolOp Call Compare Compare Call If Compare Assign Call Assign If BoolOp Compare Compare If Compare Call Assign Assign Call Assign If Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_add_warning",
    "source_code": "def _maybe_add_warning(self, node, full_name):\n    function_warnings = self._api_change_spec.function_warnings\n    if full_name in function_warnings:\n        level, message = function_warnings[full_name]\n        message = message.replace('<function name>', full_name)\n        self.add_log(level, node.lineno, node.col_offset, '%s requires manual check. %s' % (full_name, message))\n        return True\n    else:\n        return False",
    "docstring": "Adds an error to be printed about full_name at node.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:_maybe_add_warning arg:self arg:node arg:full_name arguments arg arg arg Assign If Compare Assign Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "refine_star",
    "source_code": "def refine_star(self, v):\n    vnn = copy.copy(v.nn)\n    v1nn = []\n    d_v0v1_set = set()\n    for v1 in vnn:\n        v1nn.append(copy.copy(v1.nn))\n    for v1, v1nn in zip(vnn, v1nn):\n        vnnu = v1nn.intersection(vnn)\n        d_v0v1 = self.split_edge(v.x, v1.x)\n        for o_d_v0v1 in d_v0v1_set:\n            d_v0v1.connect(o_d_v0v1)\n        d_v0v1_set.add(d_v0v1)\n        for v2 in vnnu:\n            d_v1v2 = self.split_edge(v1.x, v2.x)\n            d_v0v1.connect(d_v1v2)\n    return",
    "docstring": "Refine the star domain of a vertex .",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_complex.py",
    "ast_data": "FunctionDef name:refine_star arg:self arg:v arguments arg arg Assign Call Assign Assign Call For Call Call For Call Assign Call Assign Call For Call Call For Assign Call Call Return return:no"
  },
  {
    "library": "django",
    "name": "force_no_ordering",
    "source_code": "def force_no_ordering(self):\n    return [(None, ('NULL', [], False))]",
    "docstring": "\"ORDER BY NULL\" prevents MySQL from implicitly ordering by grouped columns. If no ordering would otherwise be applied, we don't want any implicit sorting going on.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\mysql\\operations.py",
    "ast_data": "FunctionDef name:force_no_ordering arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_angle",
    "source_code": "def set_angle(self, angle):\n    self._angle = angle\n    self.stale = True",
    "docstring": "Set the angle of the ellipse. Parameters ---------- angle : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_angle arg:self arg:angle arguments arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "is_smaller",
    "source_code": "def is_smaller(s1: PythonSignature, s2: PythonSignature) -> bool:\n    args1, args2 = (s1.arguments(skip_outputs=True), s2.arguments(skip_outputs=True))\n    if len(args1) != len(args2):\n        return False\n    equal = all((arg1.type == arg2.type for arg1, arg2 in zip(args1, args2)))\n    smaller_or_equal = all((str(arg1.type) == str(arg2.type) or is_arg_smaller(arg1.type, arg2.type) for arg1, arg2 in zip(args1, args2)))\n    return smaller_or_equal and (not equal)",
    "docstring": "Returns True if s1 < s2 in the partial order.",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py",
    "ast_data": "FunctionDef name:is_smaller arg:s1 arg:s2 arguments arg arg Assign Call Call If Compare Call Call Return return:yes Assign Call Compare Call Assign Call BoolOp Compare Call Call Call Call Return return:yes BoolOp"
  },
  {
    "library": "scipy",
    "name": "success",
    "source_code": "def success(self, x, tol=1e-05):\n    val = self.fun(asarray(x))\n    if abs(val - self.fglob) < tol:\n        return True\n    bounds = np.asarray(self.bounds, dtype=np.float64)\n    if np.any(x > bounds[:, 1]):\n        return False\n    if np.any(x < bounds[:, 0]):\n        return False\n    if val < self.fglob:\n        raise ValueError('Found a lower global minimum', x, val, self.fglob)\n    return False",
    "docstring": "Tests if a candidate solution at the global minimum. The default test is Parameters ---------- x : sequence The candidate vector for testing if the global minimum has been reached. Must have `` tol : float The evaluated function and known global minimum must differ by less than this amount to be at a global minimum. Returns ------- bool : is the candidate vector at the global minimum?",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_benchmark.py",
    "ast_data": "FunctionDef name:success arg:self arg:x arg:tol arguments arg arg arg Assign Call Call If Compare Call Return return:yes Assign Call If Call Compare Return return:yes If Call Compare Return return:yes If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "consume_prefix_in_state_dict_if_present",
    "source_code": "def consume_prefix_in_state_dict_if_present(state_dict: dict[str, Any], prefix: str) -> None:\n    keys = list(state_dict.keys())\n    for key in keys:\n        if key.startswith(prefix):\n            newkey = key[len(prefix):]\n            state_dict[newkey] = state_dict.pop(key)\n    if hasattr(state_dict, '_metadata'):\n        keys = list(state_dict._metadata.keys())\n        for key in keys:\n            if len(key) == 0:\n                continue\n            if key == prefix.replace('.', '') or key.startswith(prefix):\n                newkey = key[len(prefix):]\n                state_dict._metadata[newkey] = state_dict._metadata.pop(key)",
    "docstring": "Strip the prefix in state_dict in place, if any. .. note:: Given a from a DP/DDP model, a local model can load it by applying before calling :meth:. Args: state_dict (OrderedDict): a state-dict to be loaded to the model. prefix (str): prefix.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\modules\\utils.py",
    "ast_data": "FunctionDef name:consume_prefix_in_state_dict_if_present arg:state_dict arg:prefix arguments arg arg Assign Call Call For If Call Assign Call Assign Call If Call Assign Call Call For If Compare Call If BoolOp Compare Call Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "tree_map_",
    "source_code": "def tree_map_(func: Callable[..., Any], tree: PyTree, *rests: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> PyTree:\n    return optree.tree_map_(func, tree, *rests, is_leaf=is_leaf, none_is_leaf=True, namespace='torch')",
    "docstring": "Like :func:, but do an inplace call on each leaf and return the original tree. See also :func:. Args: func (callable): A function that takes `True`.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:tree_map_ arg:func arg:tree arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "name_from_scope_name",
    "source_code": "def name_from_scope_name(name) -> str:\n    return name[:-1] if name and name[-1] == '/' else name",
    "docstring": "Returns the name of an op given the name of its scope. Args: name: the name of the scope. Returns: the name of the op (equal to scope name minus any trailing slash).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:name_from_scope_name arg:name arguments arg Return return:yes BoolOp Compare"
  },
  {
    "library": "pytorch",
    "name": "Stream",
    "source_code": "class Stream:\n\n    def __init__(self, priority: int=-1) -> None:\n        pass\n\n    def wait_stream(self, stream) -> None:\n        pass\n\n    def record_event(self) -> None:\n        pass\n\n    def wait_event(self, event) -> None:\n        pass",
    "docstring": "N.B. This class only exists to facilitate device-agnostic code",
    "type": "class",
    "file_path": "pytorch\\torch\\cpu\\__init__.py",
    "ast_data": "ClassDef name:Stream FunctionDef name:__init__ arg:self arg:priority arguments arg arg FunctionDef name:wait_stream arg:self arg:stream arguments arg arg FunctionDef name:record_event arg:self arguments arg FunctionDef name:wait_event arg:self arg:event arguments arg arg"
  },
  {
    "library": "pytorch",
    "name": "halide_argdefs",
    "source_code": "def halide_argdefs(self):\n\n    def arg_order(arg_tuple):\n        _call_str, arg = arg_tuple\n        if isinstance(arg, SizeArg):\n            return 1\n        elif 'out_ptr' in arg.name:\n            return 2\n        else:\n            assert 'in_ptr' in arg.name\n            return 0\n    result: list[tuple[Optional[str], KernelArgType]] = []\n    _, a, b, _ = self.args.python_argdefs()\n    for call_str, arg in sorted(zip(a, b), key=arg_order):\n        result.append((call_str, arg))\n        if isinstance(arg, TensorArg):\n            assert arg.offset == 0 and arg.alias_of is None\n            result.extend(((None, TensorArg(alias, arg.buffer, arg.dtype, arg.offset, alias_of=arg.name)) for alias in self.buffer_aliases.get(arg.name, ())))\n    return result",
    "docstring": "Halide requires scalar inputs before outputs, so need to reorder args.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\halide.py",
    "ast_data": "FunctionDef name:halide_argdefs arg:self arguments arg FunctionDef name:arg_order arg:arg_tuple arguments arg Assign If Call Return return:yes If Compare Return return:yes Compare Return return:yes Assign Call For Call Call Call If Call BoolOp Compare Compare Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_iset_single",
    "source_code": "def _iset_single(self, loc: int, value: ArrayLike, inplace: bool, blkno: int, blk: Block, refs: BlockValuesRefs | None=None) -> None:\n    if inplace and blk.should_store(value):\n        copy = not self._has_no_reference_block(blkno)\n        iloc = self.blklocs[loc]\n        blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy)\n        return\n    nb = new_block_2d(value, placement=blk._mgr_locs, refs=refs)\n    old_blocks = self.blocks\n    new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1:]\n    self.blocks = new_blocks\n    return",
    "docstring": "Fastpath for iset when we are only setting a single position and the Block currently in that position is itself single-column. In this case we can swap out the entire Block and blklocs and blknos are unaffected.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:_iset_single arg:self arg:loc arg:value arg:inplace arg:blkno arg:blk arg:refs arguments arg arg arg arg arg arg arg If BoolOp Call Assign Call Assign Call Call Return return:no Assign Call Assign Assign Assign Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "capture",
    "source_code": "def capture(self, tensor, name=None):\n    if tensor.ref() in self._captured:\n        return self._captured[tensor.ref()]\n    elif self._capture_by_value:\n        return self._add_tensor_and_parents(tensor)\n    else:\n        return self._capture_tensor_as_extra_input(tensor, name)",
    "docstring": "Adds the given tensor to this graph and returns the captured tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:capture arg:self arg:tensor arg:name arguments arg arg arg If Compare Call Return return:yes Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_assert_static",
    "source_code": "def _assert_static(condition, data):\n    if not condition:\n        data_static = [_maybe_constant_value_string(x) for x in data]\n        raise errors.InvalidArgumentError(node_def=None, op=None, message='\\n'.join(data_static))",
    "docstring": "Raises a InvalidArgumentError with as much information as possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:_assert_static arg:condition arg:data arguments arg arg If Assign Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "DimSpec",
    "source_code": "@dataclass\nclass DimSpec:\n\n    def inputs(self) -> Iterable['DimSpec']:\n        return ()",
    "docstring": "Specifies how an output dimension maps to an input dimension.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_view_ops.py",
    "ast_data": "ClassDef name:DimSpec FunctionDef name:inputs arg:self arguments arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "inplace_sub",
    "source_code": "@deprecation.deprecated(None, 'Prefer tf.tensor_scatter_nd_sub, which offers the same functionality with well-defined read-write semantics.')\ndef inplace_sub(x, i, v):\n    return alias_inplace_sub(gen_array_ops.deep_copy(x), i, v)",
    "docstring": "Applies an inplace sub on input x at index i with value v. Note that this function is not actually inplace - it allocates a copy of x. The utility is not avoiding memory copies but rather specifying a sparse update. If i is None, x and v must be the same shape. Computes y = x; y -= v; If i is a scalar, x has a rank 1 higher than v's. Computes y = x; y[i, :] -= v; Otherwise, x and v must have the same rank. Computes y = x; y[i, :] -= v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. Returns: Returns y, which is guaranteed not to be an alias of x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\inplace_ops.py",
    "ast_data": "FunctionDef name:inplace_sub arg:x arg:i arg:v arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "lazy_related_operation",
    "source_code": "def lazy_related_operation(function, model, *related_models, **kwargs):\n    models = [model] + [resolve_relation(model, rel) for rel in related_models]\n    model_keys = (make_model_tuple(m) for m in models)\n    apps = model._meta.apps\n    return apps.lazy_model_operation(partial(function, **kwargs), *model_keys)",
    "docstring": "Schedule to be called once and all have been imported and registered with the app registry. will be called with the newly-loaded model classes as its positional arguments, plus any optional keyword arguments. The argument must be a model class. Each subsequent positional argument is another model, or a reference to another model - see for the various forms these may take. Any relative references will be resolved relative to . This is a convenience wrapper for - the app registry model used is the one found in .",
    "type": "function",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:lazy_related_operation arg:function arg:model arguments arg arg arg arg Assign Call Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, root, script_name='', config=None):\n    self.log = _cplogging.LogManager(id(self), cherrypy.log.logger_root)\n    self.root = root\n    self.script_name = script_name\n    self.wsgiapp = _cpwsgi.CPWSGIApp(self)\n    self.namespaces = self.namespaces.copy()\n    self.namespaces['log'] = lambda k, v: setattr(self.log, k, v)\n    self.namespaces['wsgi'] = self.wsgiapp.namespace_handler\n    self.config = self.__class__.config.copy()\n    if config:\n        self.merge(config)",
    "docstring": "Initialize Application with given root.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptree.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:root arg:script_name arg:config arguments arg arg arg arg Assign Call Call Assign Assign Assign Call Assign Call Assign arguments arg arg Call Assign Assign Call If Call"
  },
  {
    "library": "scipy",
    "name": "TemplateError",
    "source_code": "class TemplateError(Exception):\n\n    def __init__(self, message, position, name=None):\n        Exception.__init__(self, message)\n        self.position = position\n        self.name = name\n\n    def __str__(self):\n        msg = ' '.join(self.args)\n        if self.position:\n            msg = '%s at line %s column %s' % (msg, self.position[0], self.position[1])\n        if self.name:\n            msg += ' in %s' % self.name\n        return msg",
    "docstring": "Exception raised while parsing a template",
    "type": "class",
    "file_path": "scipy\\scipy\\_build_utils\\tempita\\_tempita.py",
    "ast_data": "ClassDef name:TemplateError FunctionDef name:__init__ arg:self arg:message arg:position arg:name arguments arg arg arg arg Call Assign Assign FunctionDef name:__str__ arg:self arguments arg Assign Call If Assign If Return return:yes"
  },
  {
    "library": "django",
    "name": "_unregister_instance_lookup",
    "source_code": "def _unregister_instance_lookup(self, lookup, lookup_name=None):\n    if lookup_name is None:\n        lookup_name = lookup.lookup_name\n    del self.instance_lookups[lookup_name]",
    "docstring": "Remove given lookup from instance lookups. For use in tests only as it's not thread-safe.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query_utils.py",
    "ast_data": "FunctionDef name:_unregister_instance_lookup arg:self arg:lookup arg:lookup_name arguments arg arg arg If Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "PredictOutput",
    "source_code": "class PredictOutput(ExportOutput):\n    _SINGLE_OUTPUT_DEFAULT_NAME = 'output'\n\n    def __init__(self, outputs):\n        self._outputs = self._wrap_and_check_outputs(outputs, self._SINGLE_OUTPUT_DEFAULT_NAME, error_label='Prediction')\n\n    @property\n    def outputs(self):\n        return self._outputs\n\n    def as_signature_def(self, receiver_tensors):\n        return signature_def_utils.predict_signature_def(receiver_tensors, self.outputs)",
    "docstring": "Represents the output of a generic prediction head. A generic prediction need not be either a classification or a regression. Named outputs must be provided as a dict from string to ,",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "ClassDef name:PredictOutput Assign FunctionDef name:__init__ arg:self arg:outputs arguments arg arg Assign Call FunctionDef name:outputs arg:self arguments arg Return return:yes FunctionDef name:as_signature_def arg:self arg:receiver_tensors arguments arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "as_key",
    "source_code": "def as_key(self, is_private=False):\n    if is_private:\n        return self.get_private_key()\n    return self.get_public_key()",
    "docstring": "Represent this key as raw key.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7517\\asymmetric_key.py",
    "ast_data": "FunctionDef name:as_key arg:self arg:is_private arguments arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "device_stack_has_callable",
    "source_code": "def device_stack_has_callable(device_stack):\n    return any((callable(spec._device_name_or_function) for spec in device_stack.peek_objs()))",
    "docstring": "Checks whether a device stack contains a callable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:device_stack_has_callable arg:device_stack arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "lookups",
    "source_code": "def lookups(self, request, model_admin):\n    raise NotImplementedError('The SimpleListFilter.lookups() method must be overridden to return a list of tuples (value, verbose value).')",
    "docstring": "Must be overridden to return a list of tuples (value, verbose value)",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\filters.py",
    "ast_data": "FunctionDef name:lookups arg:self arg:request arg:model_admin arguments arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "_getrow",
    "source_code": "def _getrow(self, i):\n    if self.ndim == 1:\n        if i not in (0, -1):\n            raise IndexError(f'index ({i}) out of range')\n        return self.reshape((1, self.shape[0]), copy=True)\n    M, N = self.shape\n    i = int(i)\n    if i < 0:\n        i += M\n    if i < 0 or i >= M:\n        raise IndexError(f'index ({i}) out of range')\n    indptr, indices, data = get_csr_submatrix(M, N, self.indptr, self.indices, self.data, i, i + 1, 0, N)\n    return self.__class__((data, indices, indptr), shape=(1, N), dtype=self.dtype, copy=False)",
    "docstring": "Returns a copy of row i of the matrix, as a (1 x n) CSR matrix (row vector).",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_csr.py",
    "ast_data": "FunctionDef name:_getrow arg:self arg:i arguments arg arg If Compare If Compare Raise Call Return return:yes Call Assign Assign Call If Compare If BoolOp Compare Compare Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "make_functional_deprecated_v1",
    "source_code": "def make_functional_deprecated_v1(model: nn.Module):\n    buffers = list(model.buffers())\n    if len(buffers) > 0:\n        raise RuntimeError('make_functional_deprecated_v1(model): `model` has buffers. Please use make_functional_with_buffers_deprecated_v1(model) instead.')\n    weights, descriptors, _ = extract_weights(model)\n\n    def fun(weights, data):\n        mutable_model = copy.deepcopy(model)\n        load_weights(mutable_model, descriptors, weights)\n        return mutable_model(*data)\n    return (weights, fun, descriptors)",
    "docstring": "make_functional_deprecated_v1(model) -> weights, func, weight_names Given an nn.Module, make_functional_deprecated_v1 extracts the state (weights) and returns a functional version of the model, . This makes it so that it is possible use transforms over the parameters of . can be invoked as follows: And here is an example of applying the grad transform: To put the state back into a model, use .",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\make_functional.py",
    "ast_data": "FunctionDef name:make_functional_deprecated_v1 arg:model arguments arg Assign Call Call If Compare Call Raise Call Assign Call FunctionDef name:fun arg:weights arg:data arguments arg arg Assign Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "DeVilliersGlasser02",
    "source_code": "class DeVilliersGlasser02(Benchmark):\n\n    def __init__(self, dimensions=5):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([1.0] * self.N, [60.0] * self.N))\n        self.global_optimum = [[53.81, 1.27, 3.012, 2.13, 0.507]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        t = 0.1 * arange(16)\n        y = 53.81 * 1.27 ** t * tanh(3.012 * t + sin(2.13 * t)) * cos(exp(0.507) * t)\n        return sum((x[0] * x[1] ** t * tanh(x[2] * t + sin(x[3] * t)) * cos(t * exp(x[4])) - y) ** 2.0)",
    "docstring": "DeVilliers-Glasser 2 objective function. This class defines the DeVilliers-Glasser 2 [1]_ function global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{DeVilliersGlasser01}}(x) = \\sum_{i=1}^{24} \\left[ x_1x_2^{t_i} \\tanh \\left [x_3t_i + \\sin(x_4t_i) \\right] \\cos(t_ie^{x_5}) - y_i \\right ]^2 Where, in this exercise, :math: and :math:. with :math: for :math:. *Global optimum*: :math: for :math:. .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_D.py",
    "ast_data": "ClassDef name:DeVilliersGlasser02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Call Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_parse_input_graph_proto",
    "source_code": "def _parse_input_graph_proto(input_graph: str, input_binary: bool) -> graph_pb2.GraphDef:\n    if not gfile.Exists(input_graph):\n        raise IOError(\"Input graph file '\" + input_graph + \"' does not exist!\")\n    input_graph_def = graph_pb2.GraphDef()\n    mode = 'rb' if input_binary else 'r'\n    with gfile.GFile(input_graph, mode) as f:\n        if input_binary:\n            input_graph_def.ParseFromString(f.read())\n        else:\n            text_format.Merge(f.read(), input_graph_def)\n    return input_graph_def",
    "docstring": "Parses input tensorflow graph into GraphDef proto.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\freeze_graph.py",
    "ast_data": "FunctionDef name:_parse_input_graph_proto arg:input_graph arg:input_binary arguments arg arg If Call Raise Call Assign Call Assign With Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "angle_spectrum",
    "source_code": "@_api.make_keyword_only('3.10', 'Fs')\n@_preprocess_data(replace_names=['x'])\n@_docstring.interpd\ndef angle_spectrum(self, x, Fs=None, Fc=None, window=None, pad_to=None, sides=None, **kwargs):\n    if Fc is None:\n        Fc = 0\n    spec, freqs = mlab.angle_spectrum(x=x, Fs=Fs, window=window, pad_to=pad_to, sides=sides)\n    freqs += Fc\n    lines = self.plot(freqs, spec, **kwargs)\n    self.set_xlabel('Frequency')\n    self.set_ylabel('Angle (radians)')\n    return (spec, freqs, lines[0])",
    "docstring": "Plot the angle spectrum. Compute the angle spectrum (wrapped phase spectrum) of *x*. Data is padded to a length of *pad_to* and the windowing function *window* is applied to the signal. Parameters ---------- x : 1-D array or sequence Array or sequence containing the data. %(Spectral)s %(Single_Spectrum)s Fc : int, default: 0 The center frequency of *x*, which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. Returns ------- spectrum : 1-D array The values for the angle spectrum in radians (real valued). freqs : 1-D array The frequencies corresponding to the elements in *spectrum*. line : The line created by this function. Other Parameters ---------------- data : indexable object, optional DATA_PARAMETER_PLACEHOLDER **kwargs Keyword arguments control the properties: %(Line2D:kwdoc)s See Also -------- magnitude_spectrum Plots the magnitudes of the corresponding frequencies. phase_spectrum Plots the unwrapped version of this function. specgram Can plot the angle spectrum of segments within the signal in a colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:angle_spectrum arg:self arg:x arg:Fs arg:Fc arg:window arg:pad_to arg:sides arguments arg arg arg arg arg arg arg arg If Compare Assign Assign Call Assign Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_release",
    "source_code": "def is_release(self) -> bool:\n    return self.build_type_string == 'Release'",
    "docstring": "Checks Release build.",
    "type": "method",
    "file_path": "pytorch\\tools\\setup_helpers\\env.py",
    "ast_data": "FunctionDef name:is_release arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "from_esri",
    "source_code": "def from_esri(self):\n    capi.morph_from_esri(self.ptr)",
    "docstring": "Morph this SpatialReference from ESRI's format to EPSG.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:from_esri arg:self arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "CurveBracket",
    "source_code": "@_register_style(_style_list, name='<-[')\nclass CurveBracket(_Curve):\n    arrow = '<-['\n\n    def __init__(self, widthB=1.0, lengthB=0.2, angleB=None):\n        super().__init__(widthB=widthB, lengthB=lengthB, angleB=angleB)",
    "docstring": "An arrow with an outward square bracket at its end and a head at the start.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:CurveBracket Assign FunctionDef name:__init__ arg:self arg:widthB arg:lengthB arg:angleB arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "Hook",
    "source_code": "class Hook(object):\n    callback = None\n    '\\n    The bare callable that this Hook object is wrapping, which will\\n    be called when the Hook is called.'\n    failsafe = False\n    '\\n    If True, the callback is guaranteed to run even if other callbacks\\n    from the same call point raise exceptions.'\n    priority = 50\n    'Defines the order of execution for a list of Hooks.\\n\\n    Priority numbers should be limited to the closed interval [0, 100],\\n    but values outside this range are acceptable, as are fractional\\n    values.\\n    '\n    kwargs = {}\n    '\\n    A set of keyword arguments that will be passed to the\\n    callable on each call.'\n\n    def __init__(self, callback, failsafe=None, priority=None, **kwargs):\n        self.callback = callback\n        if failsafe is None:\n            failsafe = getattr(callback, 'failsafe', False)\n        self.failsafe = failsafe\n        if priority is None:\n            priority = getattr(callback, 'priority', 50)\n        self.priority = priority\n        self.kwargs = kwargs\n\n    def __lt__(self, other):\n        return self.priority < other.priority\n\n    def __call__(self):\n        return self.callback(**self.kwargs)\n\n    def __repr__(self):\n        cls = self.__class__\n        return '%s.%s(callback=%r, failsafe=%r, priority=%r, %s)' % (cls.__module__, cls.__name__, self.callback, self.failsafe, self.priority, ', '.join(['%s=%r' % (k, v) for k, v in self.kwargs.items()]))",
    "docstring": "A callback and its metadata: failsafe, priority, and kwargs.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "ClassDef name:Hook Assign Assign Assign Assign FunctionDef name:__init__ arg:self arg:callback arg:failsafe arg:priority arguments arg arg arg arg arg Assign If Compare Assign Call Assign If Compare Assign Call Assign Assign FunctionDef name:__lt__ arg:self arg:other arguments arg arg Return return:yes Compare FunctionDef name:__call__ arg:self arguments arg Return return:yes Call FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_batch_rp_spec",
    "source_code": "def _batch_rp_spec(rp_spec: RowPartitionSpec, batch_size: Optional[int]) -> RowPartitionSpec:\n    if batch_size is None:\n        return RowPartitionSpec(uniform_row_length=rp_spec.uniform_row_length, dtype=rp_spec.dtype)\n    nrows = None if rp_spec.nrows is None else rp_spec.nrows * batch_size\n    nvals = None if rp_spec.nvals is None else rp_spec.nvals * batch_size\n    return RowPartitionSpec(nrows=nrows, nvals=nvals, uniform_row_length=rp_spec.uniform_row_length, dtype=rp_spec.dtype)",
    "docstring": "Batches a RowPartitionSpec. Given a RowPartitionSpec and a batch_size, create a RowPartitionSpec that will be the spec for the concatenation of batch_size RowPartitions. A RowPartition can be considered a transformation from a list of a given length to a list of lists. Assume rp_a is a map from list_a to nlist_a, And rp_b is a map from list_b to nlist_b. concat(rp_a, rp_b) is a transform of concat(list_a, list_b) to concat(nlist_a, nlist_b). If batch_size is None, then have the spec be able to handle an arbitrary number of RowPartitions. Args: rp_spec: a RowPartitionSpec for all the RowPartitions to be concatenated. batch_size: the number of rp_specs to be concatenated. Returns: a batched RowPartitionSpec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_batch_rp_spec arg:rp_spec arg:batch_size arguments arg arg If Compare Return return:yes Call Assign Compare Assign Compare Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_builtin_code_from_operator_code",
    "source_code": "def get_builtin_code_from_operator_code(opcode):\n    if hasattr(opcode, 'BuiltinCode') and callable(opcode.BuiltinCode):\n        return max(opcode.BuiltinCode(), opcode.DeprecatedBuiltinCode())\n    return max(opcode.builtinCode, opcode.deprecatedBuiltinCode)",
    "docstring": "Return the builtin code of the given operator code. The following method is introduced to resolve op builtin code shortage problem. The new builtin operator will be assigned to the extended builtin code field in the flatbuffer schema. Those methods helps to hide builtin code details. Args: opcode: Operator code. Returns: The builtin code of the given operator code.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\schema_util.py",
    "ast_data": "FunctionDef name:get_builtin_code_from_operator_code arg:opcode arguments arg If BoolOp Call Call Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "format_coord",
    "source_code": "def format_coord(self, x, y):\n    twins = self._twinned_axes.get_siblings(self)\n    if len(twins) == 1:\n        return '(x, y) = ({}, {})'.format('???' if x is None else self.format_xdata(x), '???' if y is None else self.format_ydata(y))\n    screen_xy = self.transData.transform((x, y))\n    xy_strs = []\n    for ax in sorted(twins, key=attrgetter('zorder')):\n        data_x, data_y = ax.transData.inverted().transform(screen_xy)\n        xy_strs.append('({}, {})'.format(ax.format_xdata(data_x), ax.format_ydata(data_y)))\n    return '(x, y) = {}'.format(' | '.join(xy_strs))",
    "docstring": "Return a format string formatting the *x*, *y* coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:format_coord arg:self arg:x arg:y arguments arg arg arg Assign Call If Compare Call Return return:yes Call Compare Call Compare Call Assign Call Assign For Call Call Assign Call Call Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "authlib",
    "name": "UnapprovedSoftwareStatementError",
    "source_code": "class UnapprovedSoftwareStatementError(OAuth2Error):\n    error = 'unapproved_software_statement'",
    "docstring": "The software statement presented is not approved for use by this authorization server.",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\errors.py",
    "ast_data": "ClassDef name:UnapprovedSoftwareStatementError Assign"
  },
  {
    "library": "tensorflow",
    "name": "copy_metadata",
    "source_code": "def copy_metadata(self):\n    return self.__class__(None, filename=self.filename, lineno=self.lineno)",
    "docstring": "Return a TraceableObject like this one, but without the object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py",
    "ast_data": "FunctionDef name:copy_metadata arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "@torch.no_grad()\ndef step(self, closure=None):\n    self._cuda_graph_capture_health_check()\n    loss = None\n    if closure is not None:\n        with torch.enable_grad():\n            loss = closure()\n    for group in self.param_groups:\n        params_with_grad: list[Tensor] = []\n        grads: list[Tensor] = []\n        row_vars: list[Optional[Tensor]] = []\n        col_vars: list[Optional[Tensor]] = []\n        variances: list[Optional[Tensor]] = []\n        state_steps: list[Tensor] = []\n        eps1, eps2 = group['eps']\n        has_complex = self._init_group(group, params_with_grad, grads, row_vars, col_vars, variances, state_steps)\n        adafactor(params_with_grad, grads, row_vars, col_vars, variances, state_steps, d=group['d'], lr=group['lr'], beta2_decay=group['beta2_decay'], weight_decay=group['weight_decay'], eps1=eps1, eps2=eps2, foreach=group['foreach'], maximize=group['maximize'], grad_scale=getattr(self, 'grad_scale', None), found_inf=getattr(self, 'found_inf', None), has_complex=has_complex)\n    return loss",
    "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\_adafactor.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Call Assign If Compare With Call Assign Call For Assign Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "selu",
    "source_code": "def selu(input: Tensor, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(selu, (input,), input, inplace=inplace)\n    if inplace:\n        result = torch.selu_(input)\n    else:\n        result = torch.selu(input)\n    return result",
    "docstring": "selu(input, inplace=False) -> Tensor Applies element-wise, :math:, with :math: and :math:. See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:selu arg:input arg:inplace arguments arg arg If Call Return return:yes Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_svd_flip_1d",
    "source_code": "def _svd_flip_1d(u, v):\n    biggest_abs_val_idx = np.argmax(np.abs(u))\n    sign = np.sign(u[biggest_abs_val_idx])\n    u *= sign\n    v *= sign",
    "docstring": "Same as svd_flip but works on 1d arrays, and is inplace",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py",
    "ast_data": "FunctionDef name:_svd_flip_1d arg:u arg:v arguments arg arg Assign Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_find_proxy",
    "source_code": "def _find_proxy(*objects_to_search):\n    proxy = None\n\n    def find_proxy(x):\n        nonlocal proxy\n        if isinstance(x, Proxy):\n            proxy = x\n    map_aggregate(objects_to_search, find_proxy)\n    return proxy",
    "docstring": "Recursively search a data structure for a Proxy() and return it, return None if not found.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:_find_proxy arguments arg Assign FunctionDef name:find_proxy arg:x arguments arg If Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "RandomChannelShuffle",
    "source_code": "class RandomChannelShuffle(IntensityAugmentationBase2D):\n\n    def __init__(self, same_on_batch: bool=False, p: float=0.5, keepdim: bool=False) -> None:\n        super().__init__(p=p, same_on_batch=same_on_batch, p_batch=1.0, keepdim=keepdim)\n\n    def generate_parameters(self, shape: Tuple[int, ...]) -> Dict[str, Tensor]:\n        B, C, _, _ = shape\n        channels = torch.rand(B, C).argsort(dim=1)\n        return {'channels': channels}\n\n    def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n        out = torch.empty_like(input)\n        for i in range(out.shape[0]):\n            out[i] = input[i, params['channels'][i]]\n        return out",
    "docstring": "Shuffle the channels of a batch of multi-dimensional images. .. image:: _static/img/RandomChannelShuffle.png Args: same_on_batch: apply the same transformation across the batch. p: probability of applying the transformation. keepdim: whether to keep the output shape the same as input ``. Examples: >>> rng = torch.manual_seed(0) >>> img = torch.arange(1*2*2*2.).view(1,2,2,2) >>> RandomChannelShuffle()(img) tensor([[[[4., 5.], [6., 7.]], [[0., 1.], [2., 3.]]]]) To apply the exact augmenation again, you may take the advantage of the previous parameter state: >>> input = torch.randn(1, 3, 32, 32) >>> aug = RandomChannelShuffle(p=1.) >>> (aug(input) == aug(input, params=aug._params)).all() tensor(True)",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\channel_shuffle.py",
    "ast_data": "ClassDef name:RandomChannelShuffle FunctionDef name:__init__ arg:self arg:same_on_batch arg:p arg:keepdim arguments arg arg arg arg Call Call FunctionDef name:generate_parameters arg:self arg:shape arguments arg arg Assign Assign Call Call Return return:yes FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Assign Call For Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_default_bbox_extra_artists",
    "source_code": "def get_default_bbox_extra_artists(self):\n    bbox_artists = [artist for artist in self.get_children() if artist.get_visible() and artist.get_in_layout()]\n    for ax in self.axes:\n        if ax.get_visible():\n            bbox_artists.extend(ax.get_default_bbox_extra_artists())\n    return bbox_artists",
    "docstring": "Return a list of Artists typically used in .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_default_bbox_extra_artists arg:self arguments arg Assign Call BoolOp Call Call For If Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "transform_points",
    "source_code": "def transform_points(self, points_in_src: Tensor) -> Tensor:\n    return self._dst_from_src * points_in_src",
    "docstring": "Transform points from source frame to destination frame. Args: points_in_src: Points in source frame. Returns: Points in destination frame. Example: >>> b_from_a = NamedPose(Se3.identity(), frame_src=\"frame_a\", frame_dst=\"frame_b\") >>> b_from_a.transform_points(torch.tensor([1., 2., 3.])) tensor([1., 2., 3.], grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\pose.py",
    "ast_data": "FunctionDef name:transform_points arg:self arg:points_in_src arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "grid2mask",
    "source_code": "def grid2mask(self, xi, yi):\n    return (round(xi * self.x_grid2mask), round(yi * self.y_grid2mask))",
    "docstring": "Return nearest space in mask-coords from given grid-coords.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\streamplot.py",
    "ast_data": "FunctionDef name:grid2mask arg:self arg:xi arg:yi arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "masked_values",
    "source_code": "def masked_values(x, value, rtol=1e-05, atol=1e-08, copy=True, shrink=True):\n    xnew = filled(x, value)\n    if np.issubdtype(xnew.dtype, np.floating):\n        mask = np.isclose(xnew, value, atol=atol, rtol=rtol)\n    else:\n        mask = umath.equal(xnew, value)\n    ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value)\n    if shrink:\n        ret.shrink_mask()\n    return ret",
    "docstring": "Mask using floating point equality. Return a MaskedArray, masked where the data in array are approximately equal to , determined using . The default tolerances for are the same as those for . For integer types, exact equality is used, in the same way as . The fill_value is set to and the mask is set to `isclosexxvaluemaskmasked_equalmasked_values` can perform approximate equalities. >>> ma.masked_values(x, 2.1, atol=1e-1) masked_array(data=[1.0, 1.1, --, 1.1, 3.0], mask=[False, False, True, False, False], fill_value=2.1)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:masked_values arg:x arg:value arg:rtol arg:atol arg:copy arg:shrink arguments arg arg arg arg arg arg Assign Call If Call Assign Call Assign Call Assign Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "def state_dict(self) -> dict[str, Any]:\n    data_groups = self._get_serializable_data_groups()\n    state = self._convert_mask(self.state)\n    return {'state': state, 'data_groups': data_groups, 'defaults': self.defaults}",
    "docstring": "Returns the state of the sparsifier as a :class:. It contains: * state - contains name -> mask mapping. * data_groups - a dictionary containing all config information for each layer * defaults - the default config while creating the constructor",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "window_unpartition",
    "source_code": "def window_unpartition(windows: Tensor, window_size: int, pad_hw: tuple[int, int], hw: tuple[int, int]) -> Tensor:\n    Hp, Wp = pad_hw\n    H, W = hw\n    B = windows.shape[0] // (Hp * Wp // window_size // window_size)\n    x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)\n    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)\n    if Hp > H or Wp > W:\n        x = x[:, :H, :W, :].contiguous()\n    return x",
    "docstring": "Window unpartition into original sequences and removing padding. Args: windows: input tokens with [B * num_windows, window_size, window_size, C]. window_size: window size. pad_hw: padded height and width (Hp, Wp). hw: original height and width (H, W) before padding. Returns: x: unpartitioned sequences with [B, H, W, C].",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\common.py",
    "ast_data": "FunctionDef name:window_unpartition arg:windows arg:window_size arg:pad_hw arg:hw arguments arg arg arg arg Assign Assign Assign Assign Call Assign Call Call Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_ppf",
    "source_code": "def _ppf(self, q):\n    if self._p_domain == 1.0:\n        return self._frozendist.ppf(q)\n    x = self._frozendist.ppf(self._p_domain * np.array(q) + self._p_lower)\n    return np.clip(x, self._domain_adj[0], self._domain_adj[1])",
    "docstring": "Percent point function (inverse of ) Parameters ---------- q : array_like lower tail probability Returns ------- x : array_like quantile corresponding to the lower tail probability q.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_sampling.py",
    "ast_data": "FunctionDef name:_ppf arg:self arg:q arguments arg arg If Compare Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "optgroups",
    "source_code": "def optgroups(self, name, value, attrs=None):\n    groups = []\n    has_selected = False\n    for index, (option_value, option_label) in enumerate(self.choices):\n        if option_value is None:\n            option_value = ''\n        subgroup = []\n        if isinstance(option_label, (list, tuple)):\n            group_name = option_value\n            subindex = 0\n            choices = option_label\n        else:\n            group_name = None\n            subindex = None\n            choices = [(option_value, option_label)]\n        groups.append((group_name, subgroup, index))\n        for subvalue, sublabel in choices:\n            selected = (not has_selected or self.allow_multiple_selected) and str(subvalue) in value\n            has_selected |= selected\n            subgroup.append(self.create_option(name, subvalue, sublabel, selected, index, subindex=subindex, attrs=attrs))\n            if subindex is not None:\n                subindex += 1\n    return groups",
    "docstring": "Return a list of optgroups for this widget.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:optgroups arg:self arg:name arg:value arg:attrs arguments arg arg arg arg Assign Assign For Call If Compare Assign Assign If Call Assign Assign Assign Assign Assign Assign Call For Assign BoolOp BoolOp Compare Call Call Call If Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_generate_comparison_values",
    "source_code": "def _generate_comparison_values(self, input_info: dict, weight_info: dict) -> dict[str, torch.Tensor]:\n    module_fqn_to_channel: dict[str, torch.Tensor] = {}\n    for module_fqn in input_info:\n        if module_fqn not in weight_info:\n            raise KeyError(f'Unable to find weight range stats for module {module_fqn}')\n        weight_ratio = self._calculate_range_ratio(weight_info[module_fqn], self.WEIGHT_STR, module_fqn)\n        input_ratio = self._calculate_range_ratio(input_info[module_fqn], self.INPUT_STR, module_fqn)\n        weight_channels = len(weight_ratio)\n        input_channels = len(input_ratio)\n        if weight_channels != input_channels:\n            assert input_channels % weight_channels == 0, 'input channels should be divisible by weight channels.'\n            rep_factor: int = input_channels // weight_channels\n            weight_ratio = weight_ratio.repeat(rep_factor)\n        s = torch.sqrt(weight_ratio) / torch.sqrt(input_ratio)\n        module_fqn_to_channel[module_fqn] = s\n    return module_fqn_to_channel",
    "docstring": "Takes in the information on the min and max values of the inputs and weights and: Calculates the comp stat for each channel: s_c = sqrt(w_c/W)/sqrt(i_c/I) Args: input_info (dict): A dict mapping each observer to input range information weight_info (dict): A dict mapping each observer to weight range information Returns a dict mapping relevant observer fqns (str) to a 1-D tensor. Each value is a different s_c value for a different channel",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:_generate_comparison_values arg:self arg:input_info arg:weight_info arguments arg arg arg For If Compare Raise Call Assign Call Assign Call Assign Call Assign Call If Compare Compare Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_partition_param_group",
    "source_code": "def _partition_param_group(self, param_group: dict[str, Any], params_per_rank: list[list[torch.Tensor]]) -> None:\n    for rank, params in enumerate(params_per_rank):\n        rank_param_group = copy.copy(param_group)\n        rank_param_group['params'] = params\n        self._partition_parameters_cache[rank].append(rank_param_group)",
    "docstring": "Partition the parameter group `_partition_parameterslistlist` s of parameters to assign to each rank.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_partition_param_group arg:self arg:param_group arg:params_per_rank arguments arg arg arg For Call Assign Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "twinx",
    "source_code": "def twinx(ax: matplotlib.axes.Axes | None=None) -> _AxesBase:\n    if ax is None:\n        ax = gca()\n    ax1 = ax.twinx()\n    return ax1",
    "docstring": "Make and return a second Axes that shares the *x*-axis. The new Axes will overlay *ax* (or the current Axes if *ax* is *None*), and its ticks will be on the right. Examples -------- :doc:",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:twinx arg:ax arguments arg If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_type_spec_from_value_converter",
    "source_code": "def register_type_spec_from_value_converter(type_object, converter_fn, allow_subclass=False):\n    _, type_object = tf_decorator.unwrap(type_object)\n    _TYPE_CONVERSION_FUNCTION_REGISTRY.append((type_object, converter_fn, allow_subclass))",
    "docstring": "Registers a function for converting values with a given type to TypeSpecs. If multiple registered s match a value, then the most recent registration takes precedence. Custom converters should not be defined for s; use instead. Args: type_object: A Python object representing the type of values accepted by . converter_fn: A function that takes one argument (an instance of the type represented by ) and returns a . allow_subclass: If true, then use to check for matches. If false, then use .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:register_type_spec_from_value_converter arg:type_object arg:converter_fn arg:allow_subclass arguments arg arg arg Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_scientific",
    "source_code": "def set_scientific(self, b):\n    self._scientific = bool(b)",
    "docstring": "Turn scientific notation on or off. See Also -------- ScalarFormatter.set_powerlimits",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_scientific arg:self arg:b arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_ragged_embedding_lookup_with_reduce",
    "source_code": "def _ragged_embedding_lookup_with_reduce(table: tf_variables.Variable, ragged: ragged_tensor.RaggedTensor, weights: ragged_tensor.RaggedTensor, combiner: str) -> core.Tensor:\n    if weights is None:\n        weights = array_ops.ones_like(ragged, dtype=table.dtype)\n    weights = array_ops.expand_dims(weights, axis=2)\n    ragged_result = embedding_ops.embedding_lookup(table, ragged)\n    ragged_result = math_ops.reduce_sum(ragged_result * weights, axis=1)\n    if combiner == 'mean':\n        ragged_result = math_ops.div_no_nan(ragged_result, math_ops.reduce_sum(weights, axis=1))\n    elif combiner == 'sqrtn':\n        ragged_result = math_ops.div_no_nan(ragged_result, math_ops.sqrt(math_ops.reduce_sum(weights * weights, axis=1)))\n    return ragged_result",
    "docstring": "Compute a ragged lookup followed by a reduce on axis 1. Args: table: The embedding table. ragged: A RaggedTensor of ids to look up. weights: A RaggedTensor of weights (or None). combiner: One of \"mean\", \"sum\", \"sqrtn\". Returns: A Tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_for_serving.py",
    "ast_data": "FunctionDef name:_ragged_embedding_lookup_with_reduce arg:table arg:ragged arg:weights arg:combiner arguments arg arg arg arg If Compare Assign Call Assign Call Assign Call Assign Call If Compare Assign Call Call If Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "add_tensor_filter",
    "source_code": "def add_tensor_filter(self, filter_name, tensor_filter):\n    self._tensor_filters[filter_name] = tensor_filter",
    "docstring": "Add a tensor filter. Args: filter_name: () name of the filter. tensor_filter: () the filter callable. See the doc string of for more details about its signature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\local_cli_wrapper.py",
    "ast_data": "FunctionDef name:add_tensor_filter arg:self arg:filter_name arg:tensor_filter arguments arg arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "use_count",
    "source_code": "def use_count(self) -> int:\n    return super().use_count()",
    "docstring": "Returns the reference count of this pool.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:use_count arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_reciprocal_flops",
    "source_code": "@ops.RegisterStatistics('Reciprocal', 'flops')\ndef _reciprocal_flops(graph, node):\n    return _unary_op_flops(graph, node)",
    "docstring": "Compute flops for Reciprocal operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_reciprocal_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "broadcast_prefix",
    "source_code": "def broadcast_prefix(prefix_tree: PyTree, full_tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> list[Any]:\n    result: list[Any] = []\n\n    def add_leaves(x: Any, subtree: PyTree) -> None:\n        subtreespec = tree_structure(subtree, is_leaf=is_leaf)\n        result.extend([x] * subtreespec.num_leaves)\n    tree_map_(add_leaves, prefix_tree, full_tree, is_leaf=is_leaf)\n    return result",
    "docstring": "Return a list of broadcasted leaves in `True`.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:broadcast_prefix arg:prefix_tree arg:full_tree arg:is_leaf arguments arg arg arg FunctionDef name:add_leaves arg:x arg:subtree arguments arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "StatsForKernelType",
    "source_code": "@dataclasses.dataclass\nclass StatsForKernelType:\n    reads: StatsForReadsOrWrites\n    writes: StatsForReadsOrWrites\n    memory: StatsForReadsOrWrites\n\n    @classmethod\n    def compute(cls, loops: list[MemoryEstimate], estimator: MemoryEstimator) -> typing.Self:\n        reads = StatsForReadsOrWrites.compute([loop.reads for loop in loops], estimator.symbols)\n        writes = StatsForReadsOrWrites.compute([loop.writes for loop in loops], estimator.symbols)\n        return cls(reads=reads, writes=writes, memory=reads + writes)",
    "docstring": "Memory usage stats that are collected for both persistent and looped kernels",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd_kernel_features.py",
    "ast_data": "ClassDef name:StatsForKernelType FunctionDef name:compute arg:cls arg:loops arg:estimator arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "Text",
    "source_code": "class Text(namedtuple('Text', 'x y font glyph width')):\n\n    def _get_pdftexmap_entry(self):\n        return PsfontsMap(find_tex_file('pdftex.map'))[self.font.texname]\n\n    @property\n    def font_path(self):\n        psfont = self._get_pdftexmap_entry()\n        if psfont.filename is None:\n            raise ValueError('No usable font file found for {} ({}); the font may lack a Type-1 version'.format(psfont.psname.decode('ascii'), psfont.texname.decode('ascii')))\n        return Path(psfont.filename)\n\n    @property\n    def font_size(self):\n        return self.font.size\n\n    @property\n    def font_effects(self):\n        return self._get_pdftexmap_entry().effects\n\n    @property\n    def index(self):\n        return self.font._index_dvi_to_freetype(self.glyph)\n\n    @property\n    def glyph_name_or_index(self):\n        entry = self._get_pdftexmap_entry()\n        return _parse_enc(entry.encoding)[self.glyph] if entry.encoding is not None else self.glyph",
    "docstring": "A glyph in the dvi file. The *x* and *y* attributes directly position the glyph. The *font*, *glyph*, and *width* attributes are kept public for back-compatibility, but users wanting to draw the glyph themselves are encouraged to instead load the font specified by at , warp it with the effects specified by , and load the glyph at the FreeType glyph .",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "ClassDef name:Text Call FunctionDef name:_get_pdftexmap_entry arg:self arguments arg Return return:yes Call Call FunctionDef name:font_path arg:self arguments arg Assign Call If Compare Raise Call Call Call Call Return return:yes Call FunctionDef name:font_size arg:self arguments arg Return return:yes FunctionDef name:font_effects arg:self arguments arg Return return:yes Call FunctionDef name:index arg:self arguments arg Return return:yes Call FunctionDef name:glyph_name_or_index arg:self arguments arg Assign Call Return return:yes Compare Call"
  },
  {
    "library": "pandas",
    "name": "memory_usage",
    "source_code": "def memory_usage(self, deep: bool=False) -> int:\n    return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep)",
    "docstring": "Memory usage of my values Parameters ---------- deep : bool Introspect the data deeply, interrogate dtypes for system-level memory consumption Returns ------- bytes used Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:memory_usage arg:self arg:deep arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_replicate_tensor",
    "source_code": "def _replicate_tensor(self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int, src_data_rank: Optional[int]=0) -> torch.Tensor:\n    my_coordinate = mesh.get_coordinate()\n    if my_coordinate is None:\n        return tensor.new_empty(0, requires_grad=tensor.requires_grad)\n    tensor = tensor.contiguous()\n    if src_data_rank is not None:\n        mesh_broadcast(tensor, mesh, mesh_dim=mesh_dim, group_src=src_data_rank)\n    return tensor",
    "docstring": "Replicate (broadcast) a torch.Tensor on a mesh dimension (use the first coordinate on the mesh dimension as source of truth)",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:_replicate_tensor arg:self arg:tensor arg:mesh arg:mesh_dim arg:src_data_rank arguments arg arg arg arg arg Assign Call If Compare Return return:yes Call Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_standardize_out_kwarg",
    "source_code": "def _standardize_out_kwarg(**kwargs) -> dict:\n    if 'out' not in kwargs and 'out1' in kwargs and ('out2' in kwargs):\n        out1 = kwargs.pop('out1')\n        out2 = kwargs.pop('out2')\n        out = (out1, out2)\n        kwargs['out'] = out\n    return kwargs",
    "docstring": "If kwargs contain \"out1\" and \"out2\", replace that with a tuple \"out\" np.divmod, np.modf, np.frexp can have either or",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\arraylike.py",
    "ast_data": "FunctionDef name:_standardize_out_kwarg arguments arg If BoolOp Compare Compare Compare Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "op_is_inside_loop",
    "source_code": "def op_is_inside_loop(self, op: ops.Operation) -> bool:\n    assert isinstance(op, ops.Operation)\n    return op._id in self._pfor_op_ids",
    "docstring": "True if op was created inside the pfor loop body.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:op_is_inside_loop arg:self arg:op arguments arg arg Call Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "rank",
    "source_code": "@property\ndef rank(self):\n    inner_rank = self.inner_rank\n    if inner_rank is None:\n        return None\n    else:\n        return self.num_row_partitions + inner_rank",
    "docstring": "The number of dimensions in this shape, or None if unknown.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:rank arg:self arguments arg Assign If Compare Return return:no Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "filter",
    "source_code": "def filter(self, predicate, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import filter_op\n    return filter_op._filter(self, predicate, name)",
    "docstring": "Filters this dataset according to . >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3]) >>> dataset = dataset.filter(lambda x: x >> [a.item() for a in dataset.as_numpy_iterator()] [1, 2] >>> # is required for equality comparison >>> def filter_fn(x): ... return tf.math.equal(x, 1) >>> dataset = dataset.filter(filter_fn) >>> [a.item() for a in dataset.as_numpy_iterator()] [1] Args: predicate: A function mapping a dataset element to a boolean. name: (Optional.) A name for the tf.data operation. Returns: A new with the transformation applied as described above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:filter arg:self arg:predicate arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "postprocess_messages",
    "source_code": "def postprocess_messages(self, msgs):\n    if not self.is_templatized:\n        return msgs\n    if os.name == 'nt':\n        old_path = self.work_path\n        new_path = self.path\n    else:\n        old_path = self.work_path[2:]\n        new_path = self.path[2:]\n    return re.sub('^(#: .*)(' + re.escape(old_path) + ')', lambda match: match[0].replace(old_path, new_path), msgs, flags=re.MULTILINE)",
    "docstring": "Postprocess messages generated by xgettext GNU gettext utility. Transform paths as if these messages were generated from original translatable files rather than from preprocessed versions.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\makemessages.py",
    "ast_data": "FunctionDef name:postprocess_messages arg:self arg:msgs arguments arg arg If Return return:yes If Compare Assign Assign Assign Assign Return return:yes Call Call arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "_calculate_population_feasibilities",
    "source_code": "def _calculate_population_feasibilities(self, population):\n    num_members = np.size(population, 0)\n    if not self._wrapped_constraints:\n        return (np.ones(num_members, bool), np.zeros((num_members, 1)))\n    parameters_pop = self._scale_parameters(population)\n    if self.vectorized:\n        constraint_violation = np.array(self._constraint_violation_fn(parameters_pop))\n    else:\n        constraint_violation = np.array([self._constraint_violation_fn(x) for x in parameters_pop])\n        constraint_violation = constraint_violation[:, 0]\n    feasible = ~(np.sum(constraint_violation, axis=1) > 0)\n    return (feasible, constraint_violation)",
    "docstring": "Calculate the feasibilities of a population. Parameters ---------- population : ndarray An array of parameter vectors normalised to [0, 1] using lower and upper limits. Has shape ``, where M is the number of constraints.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:_calculate_population_feasibilities arg:self arg:population arguments arg arg Assign Call If Return return:yes Call Call Assign Call If Assign Call Call Assign Call Call Assign Assign Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "IOHandles",
    "source_code": "@dataclasses.dataclass\nclass IOHandles(Generic[AnyStr]):\n    handle: IO[AnyStr]\n    compression: CompressionDict\n    created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list)\n    is_wrapped: bool = False\n\n    def close(self) -> None:\n        if self.is_wrapped:\n            assert isinstance(self.handle, TextIOWrapper)\n            self.handle.flush()\n            self.handle.detach()\n            self.created_handles.remove(self.handle)\n        for handle in self.created_handles:\n            handle.close()\n        self.created_handles = []\n        self.is_wrapped = False\n\n    def __enter__(self) -> IOHandles[AnyStr]:\n        return self\n\n    def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None:\n        self.close()",
    "docstring": "Return value of io/common.py:get_handle Can be used as a context manager. This is used to easily close created buffers and to handle corner cases when TextIOWrapper is inserted. handle: The file handle to be used. created_handles: All file handles that are created by get_handle is_wrapped: Whether a TextIOWrapper needs to be detached.",
    "type": "class",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "ClassDef name:IOHandles Call FunctionDef name:close arg:self arguments arg If Call Call Call Call For Call Assign Assign FunctionDef name:__enter__ arg:self arguments arg Return return:yes FunctionDef name:__exit__ arg:self arg:exc_type arg:exc_value arg:traceback arguments arg arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "get_constrained_layout",
    "source_code": "def get_constrained_layout(self):\n    return isinstance(self.get_layout_engine(), ConstrainedLayoutEngine)",
    "docstring": "Return whether constrained layout is being used. See :ref:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_constrained_layout arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "safe_call",
    "source_code": "def safe_call(func: Callable[_P, _R], *args: _P.args, **kwargs: _P.kwargs) -> _R:\n    if _IS_WINDOWS:\n        warnings.warn(f'A new process is not created for {func} on Windows.', stacklevel=1)\n        return func(*args, **kwargs)\n    with multiprocessing.get_context('fork').Pool(1) as pool:\n        result = pool.apply_async(_call_function_and_return_exception, (func, args, kwargs))\n        result = result.get(timeout=5)\n    if isinstance(result, Exception):\n        raise result\n    return result",
    "docstring": "Call a function in a separate process. Args: func: The function to call. args: The positional arguments to pass to the function. kwargs: The keyword arguments to pass to the function. Returns: The return value of the function. Raises: Exception: If the function raised an exception.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_isolated.py",
    "ast_data": "FunctionDef name:safe_call arg:func arguments arg arg arg If Call Return return:yes Call With Call Call Assign Call Assign Call If Call Raise Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "mark",
    "source_code": "def mark(msg):\n    return _nvtx.markA(msg)",
    "docstring": "Describe an instantaneous event that occurred at some point. Args: msg (str): ASCII message to associate with the event.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\nvtx.py",
    "ast_data": "FunctionDef name:mark arg:msg arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "generic",
    "source_code": "def generic(name, tensor, metadata=None, family=None, step=None):\n\n    def function(tag, scope):\n        if metadata is None:\n            serialized_metadata = constant_op.constant('')\n        elif hasattr(metadata, 'SerializeToString'):\n            serialized_metadata = constant_op.constant(metadata.SerializeToString())\n        else:\n            serialized_metadata = metadata\n        return gen_summary_ops.write_summary(_summary_state.writer._resource, _choose_step(step), array_ops.identity(tensor), tag, serialized_metadata, name=scope)\n    return summary_writer_function(name, tensor, function, family=family)",
    "docstring": "Writes a tensor summary if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:generic arg:name arg:tensor arg:metadata arg:family arg:step arguments arg arg arg arg arg FunctionDef name:function arg:tag arg:scope arguments arg arg If Compare Assign Call If Call Assign Call Call Assign Return return:yes Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_should_fallback_to_positional",
    "source_code": "@cache_readonly\ndef _should_fallback_to_positional(self) -> bool:\n    return self.levels[0]._should_fallback_to_positional",
    "docstring": "Should integer key(s) be treated as positional?",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_should_fallback_to_positional arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "_test_settings_get",
    "source_code": "def _test_settings_get(self, key, default=None, prefixed=None):\n    settings_dict = self.connection.settings_dict\n    val = settings_dict['TEST'].get(key, default)\n    if val is None and prefixed:\n        val = TEST_DATABASE_PREFIX + settings_dict[prefixed]\n    return val",
    "docstring": "Return a value from the test settings dict, or a given default, or a prefixed entry from the main settings dict.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\oracle\\creation.py",
    "ast_data": "FunctionDef name:_test_settings_get arg:self arg:key arg:default arg:prefixed arguments arg arg arg arg Assign Assign Call If BoolOp Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "guard_size_oblivious",
    "source_code": "def guard_size_oblivious(expr: Union[torch.SymBool, bool]) -> bool:\n    if isinstance(expr, torch.SymBool):\n        return expr.node.guard_size_oblivious('', 0)\n    else:\n        assert isinstance(expr, bool), expr\n        return expr",
    "docstring": "Perform a guard on a symbolic boolean expression in a size oblivious way. This is typically used when a non-oblivious test would result in a guard on a data dependent value of which we don't know the value of at compile time. When a guard is tested this way, we may diverge in behavior from how regular PyTorch semantics would treat it. For more information, see",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:guard_size_oblivious arg:expr arguments arg If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "variable_accessed",
    "source_code": "def variable_accessed(variable):\n    variables = _variables_override(variable)\n    for var in variables:\n        pywrap_tfe.TFE_Py_TapeVariableAccessed(var)\n        pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)",
    "docstring": "Notifies all tapes in the stack that a variable has been accessed. Args: variable: variable to be watched.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\tape.py",
    "ast_data": "FunctionDef name:variable_accessed arg:variable arguments arg Assign Call For Call Call"
  },
  {
    "library": "kornia",
    "name": "set_rng_device_and_dtype",
    "source_code": "def set_rng_device_and_dtype(self, device: torch.device, dtype: torch.dtype) -> None:\n    self.device = device\n    self.dtype = dtype\n    if self._param_generator is not None:\n        self._param_generator.set_rng_device_and_dtype(device, dtype)",
    "docstring": "Change the random generation device and dtype. Note: The generated random numbers are not reproducible across different devices and dtypes.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:set_rng_device_and_dtype arg:self arg:device arg:dtype arguments arg arg arg Assign Assign If Compare Call"
  },
  {
    "library": "pandas",
    "name": "conform",
    "source_code": "def conform(self, rhs):\n    if not is_list_like(rhs):\n        rhs = [rhs]\n    if isinstance(rhs, np.ndarray):\n        rhs = rhs.ravel()\n    return rhs",
    "docstring": "inplace conform rhs",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "FunctionDef name:conform arg:self arg:rhs arguments arg arg If Call Assign If Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "del_tracking",
    "source_code": "def del_tracking(self):\n    for node in self.loaded_nodes.values():\n        node = node[0]\n        if not isinstance(node, base_layer.Layer):\n            continue\n        for name in PUBLIC_ATTRIBUTES:\n            node._delete_tracking(name)\n        if isinstance(node, functional_lib.Functional):\n            dependencies = list(node._self_unconditional_dependency_names)\n            for name in dependencies:\n                if re.match('^layer(_with_weights)?-[\\\\d+]', name) is not None:\n                    node._delete_tracking(name)",
    "docstring": "Removes tracked references that are only used when loading the model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:del_tracking arg:self arguments arg For Call Assign If Call For Call If Call Assign Call For If Compare Call Call"
  },
  {
    "library": "django",
    "name": "Form",
    "source_code": "class Form(BaseForm, metaclass=DeclarativeFieldsMetaclass):\n    pass",
    "docstring": "A collection of Fields, plus their associated data.",
    "type": "class",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "ClassDef name:Form"
  },
  {
    "library": "matplotlib",
    "name": "set_center",
    "source_code": "def set_center(self, xy):\n    self._center = xy\n    self._path = None\n    self.stale = True",
    "docstring": "Set the center of the annulus. Parameters ---------- xy : (float, float)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_center arg:self arg:xy arguments arg arg Assign Assign Assign"
  },
  {
    "library": "scipy",
    "name": "http_manager",
    "source_code": "def http_manager():\n    proxy_dict = urllib.request.getproxies()\n    if 'http' in proxy_dict:\n        http = urllib3.ProxyManager(proxy_dict['http'])\n    elif 'all' in proxy_dict:\n        http = urllib3.ProxyManager(proxy_dict['all'])\n    else:\n        http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')\n    return http",
    "docstring": "Return a urllib3 http request manager, leveraging proxy settings when available.",
    "type": "function",
    "file_path": "scipy\\tools\\download-wheels.py",
    "ast_data": "FunctionDef name:http_manager arguments Assign Call If Compare Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "WarningStreamHandler",
    "source_code": "class WarningStreamHandler(logging.StreamHandler['SafeEncodingWriter']):\n    pass",
    "docstring": "StreamHandler for warnings.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "ClassDef name:WarningStreamHandler"
  },
  {
    "library": "django",
    "name": "_get_latest_lastmod",
    "source_code": "def _get_latest_lastmod(current_lastmod, new_lastmod):\n    if not isinstance(new_lastmod, datetime.datetime):\n        new_lastmod = datetime.datetime.combine(new_lastmod, datetime.time.min)\n    if timezone.is_naive(new_lastmod):\n        new_lastmod = timezone.make_aware(new_lastmod, datetime.UTC)\n    return new_lastmod if current_lastmod is None else max(current_lastmod, new_lastmod)",
    "docstring": "Returns the latest where can be either a date or a datetime.",
    "type": "function",
    "file_path": "django\\django\\contrib\\sitemaps\\views.py",
    "ast_data": "FunctionDef name:_get_latest_lastmod arg:current_lastmod arg:new_lastmod arguments arg arg If Call Assign Call If Call Assign Call Return return:yes Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_auto_step_size",
    "source_code": "def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept, n_samples=None, is_saga=False):\n    if loss in ('log', 'multinomial'):\n        L = 0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled\n    elif loss == 'squared':\n        L = max_squared_sum + int(fit_intercept) + alpha_scaled\n    else:\n        raise ValueError(\"Unknown loss function for SAG solver, got %s instead of 'log' or 'squared'\" % loss)\n    if is_saga:\n        mun = min(2 * n_samples * alpha_scaled, L)\n        step = 1.0 / (2 * L + mun)\n    else:\n        step = 1.0 / L\n    return step",
    "docstring": "Compute automatic step size for SAG solver. The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is the max sum of squares for over all samples. Parameters ---------- max_squared_sum : float Maximum squared sum of X over samples. alpha_scaled : float Constant that multiplies the regularization term, scaled by 1. / n_samples, the number of samples. loss : {'log', 'squared', 'multinomial'} The loss function used in SAG solver. fit_intercept : bool Specifies if a constant (a.k.a. bias or intercept) will be added to the decision function. n_samples : int, default=None Number of rows in X. Useful if is_saga=True. is_saga : bool, default=False Whether to return step size for the SAGA algorithm or the SAG algorithm. Returns ------- step_size : float Step size used in SAG solver. References ---------- Schmidt, M., Roux, N. L., & Bach, F. (2013). Minimizing finite sums with the stochastic average gradient :arxiv:",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_sag.py",
    "ast_data": "FunctionDef name:get_auto_step_size arg:max_squared_sum arg:alpha_scaled arg:loss arg:fit_intercept arg:n_samples arg:is_saga arguments arg arg arg arg arg arg If Compare Assign Call If Compare Assign Call Raise Call If Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_best_effort_input_batch_size",
    "source_code": "def _best_effort_input_batch_size(flat_input):\n    for input_ in flat_input:\n        shape = input_.shape\n        if shape.rank is None:\n            continue\n        if shape.rank < 2:\n            raise ValueError(f'Input tensor should have rank >= 2. Received input={input_} of rank {shape.rank}')\n        batch_size = shape.dims[1].value\n        if batch_size is not None:\n            return batch_size\n    return array_ops.shape(flat_input[0])[1]",
    "docstring": "Get static input batch size if available, with fallback to the dynamic one. Args: flat_input: An iterable of time major input Tensors of shape . All inputs should have compatible batch sizes. Returns: The batch size in Python integer if available, or a scalar Tensor otherwise. Raises: ValueError: if there is any input with an invalid shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn.py",
    "ast_data": "FunctionDef name:_best_effort_input_batch_size arg:flat_input arguments arg For Assign If Compare If Compare Raise Call Assign If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_in_op_degree",
    "source_code": "def _in_op_degree(op):\n    count = 0\n    for op in op.control_inputs + [in_tensor.op for in_tensor in op.inputs]:\n        if not _is_loop_edge(op):\n            count += 1\n    return count",
    "docstring": "Returns the number of incoming edges to the given op. The edge calculation skips the edges that come from 'NextIteration' ops. NextIteration creates a cycle in the graph. We break cycles by treating this op as 'sink' and ignoring all outgoing edges from it. Args: op: Tf.Operation Returns: the number of incoming edges.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py",
    "ast_data": "FunctionDef name:_in_op_degree arg:op arguments arg Assign For If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "filter_dependency",
    "source_code": "def filter_dependency(self, line):\n    line = line.strip('\\n')\n    expr = '(?P<cfg>[\\\\S]+) (?P<cfg_spec>range\\\\([\\\\d\\\\.\\\\,\\\\s]+\\\\)( )?'\n    expr += '(include\\\\([\\\\d\\\\.\\\\,\\\\s]+\\\\))?( )?(exclude\\\\([\\\\d\\\\.\\\\,\\\\s]+\\\\))?( )?'\n    expr += '|[\\\\d\\\\,\\\\.\\\\s]+) requires (?P<cfgd>[\\\\S]+) (?P<cfgd_spec>range'\n    expr += '\\\\([\\\\d\\\\.\\\\,\\\\s]+\\\\)( )?(include\\\\([\\\\d\\\\.\\\\,\\\\s]+\\\\))?( )?'\n    expr += '(exclude\\\\([\\\\d\\\\.\\\\,\\\\s]+\\\\))?( )?|[\\\\d\\\\,\\\\.\\\\s]+)'\n    r = re.match(expr, line.strip('\\n'))\n    return r.groupdict()",
    "docstring": "Filters dependency compatibility rules defined in the config file. Dependency specifications are defined as the following: e.g. Args: line: String that is a dependency specification defined under section in the config file. Returns: Dict with configuration and its dependency information. e.g. {: , # configuration name : , # configuration version : , # dependency name : } # dependency version",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\compat_checker\\compat_checker.py",
    "ast_data": "FunctionDef name:filter_dependency arg:self arg:line arguments arg arg Assign Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "set_style",
    "source_code": "def set_style(style=None, rc=None):\n    style_object = axes_style(style, rc)\n    mpl.rcParams.update(style_object)",
    "docstring": "Set the parameters that control the general style of the plots. The style parameters control properties like the color of the background and whether a grid is enabled by default. This is accomplished using the matplotlib rcParams system. The options are illustrated in the :doc:. See :func: to get the parameter values. Parameters ---------- style : dict, or one of {darkgrid, whitegrid, dark, white, ticks} A dictionary of parameters or the name of a preconfigured style. rc : dict, optional Parameter mappings to override the values in the preset seaborn style dictionaries. This only updates parameters that are considered part of the style definition. Examples -------- .. include:: ../docstrings/set_style.rst",
    "type": "function",
    "file_path": "seaborn\\seaborn\\rcmod.py",
    "ast_data": "FunctionDef name:set_style arg:style arg:rc arguments arg arg Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_reverted_commit_hashes",
    "source_code": "def get_reverted_commit_hashes(message: str) -> list[str]:\n    print('Head commit message:', message, sep='\\n')\n    regex = re.compile('reverts ([0-9a-f]{5,40})', flags=re.IGNORECASE)\n    commit_hashes = regex.findall(message)\n    print(f'Found commit hashes reverted in this commit: {commit_hashes}')\n    return commit_hashes",
    "docstring": "Searches a commit message for and returns the found SHAs. Arguments: message: the commit message to search Returns: A list of SHAs as strings.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\.github\\workflows\\rollback_notification.py",
    "ast_data": "FunctionDef name:get_reverted_commit_hashes arg:message arguments arg Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "compute_gradients",
    "source_code": "def compute_gradients(self, loss, var_list=None, **kwargs):\n    num_shards = tpu_function.get_tpu_context().number_of_shards\n    if num_shards is None:\n        logging.warning('CrossShardOptimizer should be used within a tpu_shard_context, but got unset number_of_shards. Assuming 1.')\n        num_shards = 1\n    subgroup_size = self._verify_and_get_subgroup_size(self._group_assignment, num_shards)\n    if num_shards > 1 and self._reduction == losses.Reduction.MEAN:\n        if self._group_assignment:\n            scale = 1.0 / subgroup_size\n        else:\n            scale = 1.0 / num_shards\n        loss *= scale\n    return self._opt.compute_gradients(loss, var_list=var_list, **kwargs)",
    "docstring": "Compute gradients of \"loss\" for the variables in \"var_list\". This simply wraps from the real optimizer. The gradients will be aggregated in so that user can modify the gradients like clipping with per replica global norm if needed. The global norm with aggregated gradients can be bad as one replica's huge gradients can hurt the gradients from other replicas. When the CrossShardOptimizer is constructed with (default), this function scales the loss by before computing the gradients. Assuming the optimizer uses the default implementation of , the gradients of the scaled loss are scaled by compared to the gradients of the original loss. This scaling factor is important because sums gradients across shards, rather than averaging them. However, the scaling factor must be taken into account when clipping the norm of the gradients or performing other postprocessing. Args: loss: A Tensor containing the value to minimize. var_list: Optional list or tuple of to update to minimize . Defaults to the list of variables collected in the graph under the key . **kwargs: Keyword arguments for compute_gradients(). Returns: A list of (gradient, variable) pairs. Raises: ValueError: If not within a tpu_shard_context or group_assignment is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_optimizer.py",
    "ast_data": "FunctionDef name:compute_gradients arg:self arg:loss arg:var_list arguments arg arg arg arg Assign Call If Compare Call Assign Assign Call If BoolOp Compare Compare If Assign Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "pie",
    "source_code": "def pie(self, y: IndexLabel | None=None, **kwargs) -> PlotAccessor:\n    if y is not None:\n        kwargs['y'] = y\n    if isinstance(self._parent, ABCDataFrame) and kwargs.get('y', None) is None and (not kwargs.get('subplots', False)):\n        raise ValueError(\"pie requires either y column or 'subplots=True'\")\n    return self(kind='pie', **kwargs)",
    "docstring": "Generate a pie plot. A pie plot is a proportional representation of the numerical data in a column. This function wraps :meth: for the specified column. If no column reference is passed and `DataFrame.plotsubplots` is True. See Also -------- Series.plot.pie : Generate a pie plot for a Series. DataFrame.plot : Make plots of a DataFrame. Examples -------- In the example below we have a DataFrame with the information about planet's mass and radius. We pass the 'mass' column to the pie function to get a pie plot. .. plot:: :context: close-figs >>> df = pd.DataFrame( ... {\"mass\": [0.330, 4.87, 5.97], \"radius\": [2439.7, 6051.8, 6378.1]}, ... index=[\"Mercury\", \"Venus\", \"Earth\"], ... ) >>> plot = df.plot.pie(y=\"mass\", figsize=(5, 5)) .. plot:: :context: close-figs >>> plot = df.plot.pie(subplots=True, figsize=(11, 6))",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_core.py",
    "ast_data": "FunctionDef name:pie arg:self arg:y arguments arg arg arg If Compare Assign If BoolOp Call Compare Call Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_py_torch_functions",
    "source_code": "def get_py_torch_functions(python_funcs: Sequence[PythonSignatureNativeFunctionPair], method: bool=False) -> Sequence[PythonSignatureGroup]:\n\n    def should_bind_function(python_func: PythonSignatureNativeFunctionPair) -> bool:\n        return should_generate_py_binding(python_func.function) and (not python_func.function.python_module) and (Variant.function in python_func.function.variants)\n\n    def should_bind_method(python_func: PythonSignatureNativeFunctionPair) -> bool:\n        return should_generate_py_binding(python_func.function) and (not python_func.function.python_module) and (Variant.method in python_func.function.variants)\n    should_bind = should_bind_method if method else should_bind_function\n    return group_overloads([f for f in python_funcs if should_bind(f)])",
    "docstring": "Get declarations (grouped by name) which should be generated as either functions in the \"torch\" module or methods on Tensor.",
    "type": "function",
    "file_path": "pytorch\\tools\\pyi\\gen_pyi.py",
    "ast_data": "FunctionDef name:get_py_torch_functions arg:python_funcs arg:method arguments arg arg FunctionDef name:should_bind_function arg:python_func arguments arg Return return:yes BoolOp Call Compare FunctionDef name:should_bind_method arg:python_func arguments arg Return return:yes BoolOp Call Compare Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "UnimplementedError",
    "source_code": "@tf_export('errors.UnimplementedError')\nclass UnimplementedError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(UnimplementedError, self).__init__(node_def, op, message, UNIMPLEMENTED, *args)",
    "docstring": "Raised when an operation has not been implemented. Some operations may raise this error when passed otherwise-valid arguments that it does not currently support. For example, running the operation would raise this error if pooling was requested on the batch dimension, because this is not yet supported.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:UnimplementedError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "reduction_collapse_dims",
    "source_code": "def reduction_collapse_dims(self, buffer, value: str, dtype: torch.dtype) -> str:\n    if self.num_reduction_dims == 1:\n        return value\n    target_ndim = self.triton_tensor_ndim() - self.num_reduction_dims\n    initial_shape = self.dense_size_list()\n    target_shape = initial_shape[:target_ndim] + ['RBLOCK']\n    return str(self.cse.generate(buffer, triton_reshape(value, initial_shape, target_shape), dtype=dtype))",
    "docstring": "Reshape to RBLOCK, collapsing all reduction dims.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "FunctionDef name:reduction_collapse_dims arg:self arg:buffer arg:value arg:dtype arguments arg arg arg arg If Compare Return return:yes Assign Call Assign Call Assign Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, K, y=None):\n    xp, _ = get_namespace(K)\n    K = validate_data(self, K, dtype=_array_api.supported_float_dtypes(xp))\n    if K.shape[0] != K.shape[1]:\n        raise ValueError('Kernel matrix must be a square matrix. Input is a {}x{} matrix.'.format(K.shape[0], K.shape[1]))\n    n_samples = K.shape[0]\n    self.K_fit_rows_ = xp.sum(K, axis=0) / n_samples\n    self.K_fit_all_ = xp.sum(self.K_fit_rows_) / n_samples\n    return self",
    "docstring": "Fit KernelCenterer. Parameters ---------- K : ndarray of shape (n_samples, n_samples) Kernel matrix. y : None Ignored. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:fit arg:self arg:K arg:y arguments arg arg arg Assign Call Assign Call Call If Compare Raise Call Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "wrapped_retrieve_collected_errors",
    "source_code": "def wrapped_retrieve_collected_errors():\n    return _pywrap_converter_api.RetrieveCollectedErrors()",
    "docstring": "Wraps RetrieveCollectedErrors with lazy loader.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\lite\\python\\wrap_converter.py",
    "ast_data": "FunctionDef name:wrapped_retrieve_collected_errors arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_default_global_save_plan",
    "source_code": "def create_default_global_save_plan(all_plans: list[SavePlan], rewrite_index_hints: bool=True) -> tuple[list[SavePlan], Metadata]:\n    md: dict[str, STORAGE_TYPES] = {}\n    new_plans = []\n    for plan in all_plans:\n        new_items = []\n        for item in plan.items:\n            if not item.type == WriteItemType.SHARD:\n                assert item.index.fqn not in md\n            if item.type == WriteItemType.BYTE_IO:\n                md[item.index.fqn] = BytesStorageMetadata()\n                new_items.append(item)\n            else:\n                assert item.tensor_data is not None\n                tensor_md = cast(TensorStorageMetadata, md.setdefault(item.index.fqn, TensorStorageMetadata(properties=item.tensor_data.properties, size=item.tensor_data.size, chunks=[])))\n                new_item = item\n                if rewrite_index_hints:\n                    new_index = dataclasses.replace(item.index, index=len(tensor_md.chunks))\n                    new_item = dataclasses.replace(item, index=new_index)\n                new_items.append(new_item)\n                assert item.tensor_data.chunk is not None, f'\\n                    Cannot create MD for tensor without bounds.\\n                    FQN: {item.index.fqn}\\n                '\n                tensor_md.chunks.append(item.tensor_data.chunk)\n        new_plans.append(dataclasses.replace(plan, items=new_items))\n    return (new_plans, Metadata(md))",
    "docstring": "Create the global plan and metadata used by DefaultSavePlanner. Metadata is produced by concatenating the metadata of all `` is True.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py",
    "ast_data": "FunctionDef name:create_default_global_save_plan arg:all_plans arg:rewrite_index_hints arguments arg arg Assign For Assign For If Compare Compare If Compare Assign Call Call Compare Assign Call Call Call Assign If Assign Call Call Assign Call Call Compare Call Call Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "verify",
    "source_code": "@abc.abstractmethod\ndef verify(self, signature: bytes, data: bytes, padding: AsymmetricPadding, algorithm: asym_utils.Prehashed | hashes.HashAlgorithm) -> None:\n    pass",
    "docstring": "Verifies the signature of the data.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py",
    "ast_data": "FunctionDef name:verify arg:self arg:signature arg:data arg:padding arg:algorithm arguments arg arg arg arg arg"
  },
  {
    "library": "matplotlib",
    "name": "on_key_release",
    "source_code": "def on_key_release(self, event):\n    if self.active:\n        key = event.key or ''\n        for state, modifier in self._state_modifier_keys.items():\n            if modifier in key.split('+') and state != 'rotate':\n                self._state.discard(state)\n        self._on_key_release(event)",
    "docstring": "Key release event handler and validator.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:on_key_release arg:self arg:event arguments arg arg If Assign BoolOp For Call If BoolOp Compare Call Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "evaluate_sym_node",
    "source_code": "def evaluate_sym_node(self, sym_node: SymNode, size_oblivious: bool=False, fallback_value: Optional[bool]=None) -> sympy.Basic:\n    self._expr_sym_node_id = id(sym_node)\n    return self.evaluate_expr(sym_node.expr, sym_node.hint, sym_node.fx_node, size_oblivious, fallback_value=fallback_value)",
    "docstring": "Given a a SymNode, evaluates sym_node.expr, adding guards if necessary.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:evaluate_sym_node arg:self arg:sym_node arg:size_oblivious arg:fallback_value arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_yaxis",
    "source_code": "def get_yaxis(self):\n    return self.yaxis",
    "docstring": "[*Discouraged*] Return the YAxis instance. .. admonition:: Discouraged The use of this function is discouraged. You should instead directly access the attribute .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_yaxis arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_default",
    "source_code": "def get_default(self) -> Graph:\n    if self.stack:\n        return self.stack[-1]\n    elif self._global_default_graph:\n        return self._global_default_graph\n    else:\n        self._global_default_graph = Graph()\n        return self._global_default_graph",
    "docstring": "Override that returns a global default if the stack is empty.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:get_default arg:self arguments arg If Return return:yes If Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "@tf_should_use.should_use_result\ndef close(self, name=None):\n    return gen_data_flow_ops.tensor_array_close_v3(handle=self._handle, name=name)",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:close arg:self arg:name arguments arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_split_into",
    "source_code": "def _split_into(n: int, type: str, value: str) -> list[str]:\n    parts = [x.strip() for x in value.split(';', n - 1)]\n    if len(list(filter(None, parts))) < n:\n        msg = f'invalid {type} index entry {value!r}'\n        raise ValueError(msg)\n    return parts",
    "docstring": "Split an index entry into a given number of parts at semicolons.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\index_entries.py",
    "ast_data": "FunctionDef name:_split_into arg:n arg:type arg:value arguments arg arg arg Assign Call Call If Compare Call Call Call Assign Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "FuncNorm",
    "source_code": "@make_norm_from_scale(scale.FuncScale, init=lambda functions, vmin=None, vmax=None, clip=False: None)\nclass FuncNorm(Normalize):\n    pass",
    "docstring": "Arbitrary normalization using functions for the forward and inverse. Parameters ---------- functions : (callable, callable) two-tuple of the forward and inverse functions for the normalization. The forward function must be monotonic. Both functions must have the signature :: def forward(values: array-like) -> array-like vmin, vmax : float or None If *vmin* and/or *vmax* is not given, they are initialized from the minimum and maximum value, respectively, of the first input processed; i.e., ``. This behavior is usually desirable, as colormaps can mark these *under* and *over* values with specific colors. If clipping is on, values below *vmin* are mapped to 0 and values above *vmax* are mapped to 1. Such values become indistinguishable from regular boundary values, which may cause misinterpretation of the data.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "ClassDef name:FuncNorm Call arguments arg arg arg arg"
  },
  {
    "library": "django",
    "name": "start_object",
    "source_code": "def start_object(self, obj):\n    if not hasattr(obj, '_meta'):\n        raise base.SerializationError('Non-model object (%s) encountered during serialization' % type(obj))\n    self.indent(1)\n    attrs = {'model': str(obj._meta)}\n    if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):\n        obj_pk = obj.pk\n        if obj_pk is not None:\n            attrs['pk'] = obj._meta.pk.value_to_string(obj)\n    self.xml.startElement('object', attrs)",
    "docstring": "Called as each object is handled.",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\xml_serializer.py",
    "ast_data": "FunctionDef name:start_object arg:self arg:obj arguments arg arg If Call Raise Call Call Call Assign Call If BoolOp Call Assign If Compare Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "rvs",
    "source_code": "def rvs(self, dim, size=None, random_state=None):\n    random_state = self._get_random_state(random_state)\n    if size is None:\n        size = np.array([], dtype=int)\n    size = np.atleast_1d(size)\n    dim = self._process_parameters(dim)\n    samples = _sample_uniform_direction(dim, size, random_state)\n    return samples",
    "docstring": "Draw random samples from S(N-1). Parameters ---------- dim : integer Dimension of space (N). size : int or tuple of ints, optional Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). If no shape is specified, a single (N-D) sample is returned. random_state : {None, int, , }, optional Pseudorandom number generator state used to generate resamples. If is `np.randomnumpy.random.RandomStaterandom_staterandom_staterandom_state` instance then that instance is used. Returns ------- rvs : ndarray Random direction vectors",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:rvs arg:self arg:dim arg:size arg:random_state arguments arg arg arg arg Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_deterministic_vector_sign_flip",
    "source_code": "def _deterministic_vector_sign_flip(u):\n    max_abs_rows = np.argmax(np.abs(u), axis=1)\n    signs = np.sign(u[range(u.shape[0]), max_abs_rows])\n    u *= signs[:, np.newaxis]\n    return u",
    "docstring": "Modify the sign of vectors for reproducibility. Flips the sign of elements of all the vectors (rows of u) such that the absolute maximum element of each vector is positive. Parameters ---------- u : ndarray Array with vectors as its rows. Returns ------- u_flipped : ndarray with same shape as u Array with the sign flipped vectors as its rows.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\extmath.py",
    "ast_data": "FunctionDef name:_deterministic_vector_sign_flip arg:u arguments arg Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_spatialite_func",
    "source_code": "def _get_spatialite_func(self, func):\n    cursor = self.connection._cursor()\n    try:\n        cursor.execute('SELECT %s' % func)\n        row = cursor.fetchone()\n    finally:\n        cursor.close()\n    return row[0]",
    "docstring": "Helper routine for calling SpatiaLite functions and returning their result. Any error occurring in this method should be handled by the caller.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py",
    "ast_data": "FunctionDef name:_get_spatialite_func arg:self arg:func arguments arg arg Assign Call Try Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_field",
    "source_code": "def get_field(proto: message.Message, fields: FieldTypes) -> tuple[Any, Optional[descriptor.FieldDescriptor]]:\n    field_proto = proto\n    field_desc = None\n    for field_proto, field_desc, _, _ in _walk_fields(proto, fields):\n        pass\n    return (field_proto, field_desc)",
    "docstring": "Returns the field and field descriptor from the proto. Args: proto: Parent proto of any message type. fields: List of string/int/map key fields, e.g. [\"nodes\", \"attr\", \"value\"] can represent . Returns: Tuple of ( Field in the proto or if none are found, Field descriptor )",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\util.py",
    "ast_data": "FunctionDef name:get_field arg:proto arg:fields arguments arg arg Assign Assign For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_debug_info_func",
    "source_code": "def convert_debug_info_func(saved_debug_info):\n\n    def f(original_nodes):\n        del original_nodes\n        return saved_debug_info\n    return f",
    "docstring": "Returns a method to retrieve the from the original graph. Args: saved_debug_info: The containing all the debug info. Returns: A function which retrieves the stack traces from the original graph and converts them to a for a given set of nodes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:convert_debug_info_func arg:saved_debug_info arguments arg FunctionDef name:f arg:original_nodes arguments arg Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "BivariateSpline",
    "source_code": "class BivariateSpline(Benchmark):\n    param_names = ['n_samples']\n    params = [[10, 20, 30]]\n\n    def setup(self, n_samples):\n        x = np.arange(0, n_samples, 0.5)\n        y = np.arange(0, n_samples, 0.5)\n        x, y = np.meshgrid(x, y)\n        x = x.ravel()\n        y = y.ravel()\n        xmin = x.min() - 1\n        xmax = x.max() + 1\n        ymin = y.min() - 1\n        ymax = y.max() + 1\n        s = 1.1\n        self.yknots = np.linspace(ymin + s, ymax - s, 10)\n        self.xknots = np.linspace(xmin + s, xmax - s, 10)\n        self.z = np.sin(x) + 0.1 * np.random.normal(size=x.shape)\n        self.x = x\n        self.y = y\n\n    def time_smooth_bivariate_spline(self, n_samples):\n        interpolate.SmoothBivariateSpline(self.x, self.y, self.z)\n\n    def time_lsq_bivariate_spline(self, n_samples):\n        interpolate.LSQBivariateSpline(self.x, self.y, self.z, self.xknots.flat, self.yknots.flat)",
    "docstring": "Author: josef-pktd and scipy mailinglist example ' -and-my-crashing-python-td14801.html'",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\interpolate.py",
    "ast_data": "ClassDef name:BivariateSpline Assign Assign FunctionDef name:setup arg:self arg:n_samples arguments arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call Call Assign Assign FunctionDef name:time_smooth_bivariate_spline arg:self arg:n_samples arguments arg arg Call FunctionDef name:time_lsq_bivariate_spline arg:self arg:n_samples arguments arg arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_calc_depthwise_conv_flops",
    "source_code": "@ops.RegisterStatistics('DepthwiseConv2dNative', 'flops')\ndef _calc_depthwise_conv_flops(graph, node):\n    input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n    input_shape.assert_is_fully_defined()\n    filter_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])\n    filter_shape.assert_is_fully_defined()\n    output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n    output_shape.assert_is_fully_defined()\n    filter_height = int(filter_shape[0])\n    filter_width = int(filter_shape[1])\n    output_count = np.prod(output_shape.as_list(), dtype=np.int64)\n    return ops.OpStats('flops', output_count * filter_height * filter_width * 2)",
    "docstring": "Calculates the compute resources needed for DepthwiseConv2dNative.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:_calc_depthwise_conv_flops arg:graph arg:node arguments arg arg Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "unsupported_output_tensor",
    "source_code": "def unsupported_output_tensor(t: torch.Tensor, parent=None, node=None):\n    if unsupported_input_tensor(t, parent):\n        return True\n    return t.is_cpu and config.disable_cpp_codegen",
    "docstring": "Do not support writing tensor but can read from it",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:unsupported_output_tensor arg:t arg:parent arg:node arguments arg arg arg If Call Return return:yes Return return:yes BoolOp"
  },
  {
    "library": "django",
    "name": "get_queryset",
    "source_code": "def get_queryset(self, request):\n    qs = self.model._default_manager.get_queryset()\n    ordering = self.get_ordering(request)\n    if ordering:\n        qs = qs.order_by(*ordering)\n    return qs",
    "docstring": "Return a QuerySet of all model instances that can be edited by the admin site. This is used by changelist_view.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:get_queryset arg:self arg:request arguments arg arg Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, grpc_debug_server_addresses, watch_fn=None, thread_name_filter=None):\n    self._grpc_debug_wrapper_session = None\n    self._thread_name_filter = thread_name_filter\n    self._grpc_debug_server_addresses = grpc_debug_server_addresses if isinstance(grpc_debug_server_addresses, list) else [grpc_debug_server_addresses]\n    self._watch_fn = watch_fn",
    "docstring": "Constructs a GrpcDebugHook. Args: grpc_debug_server_addresses: ( of ) A list of the gRPC debug server addresses, in the format of , with or without the \"grpc://\" prefix. For example: [\"localhost:7000\", \"192.168.0.2:8000\"] watch_fn: A function that allows for customizing which ops to watch at which specific steps. See doc of for details. thread_name_filter: Regular-expression white list for threads on which the wrapper session will be active. See doc of for more details.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\hooks.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:grpc_debug_server_addresses arg:watch_fn arg:thread_name_filter arguments arg arg arg arg Assign Assign Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "ask_to_proceed_with_overwrite",
    "source_code": "def ask_to_proceed_with_overwrite(filepath):\n    overwrite = input('[WARNING] %s already exists - overwrite? [y/n]' % filepath).strip().lower()\n    while overwrite not in ('y', 'n'):\n        overwrite = input('Enter \"y\" (overwrite) or \"n\" (cancel).').strip().lower()\n    if overwrite == 'n':\n        return False\n    print('[TIP] Next time specify overwrite=True!')\n    return True",
    "docstring": "Produces a prompt asking about overwriting a file. Args: filepath: the path to the file to be overwritten. Returns: True if we can proceed with overwrite, False otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\io_utils.py",
    "ast_data": "FunctionDef name:ask_to_proceed_with_overwrite arg:filepath arguments arg Assign Call Call Call While Compare Assign Call Call Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "VolumeRenderer",
    "source_code": "class VolumeRenderer(torch.nn.Module):\n    _huge = 10000000000.0\n    _eps = 1e-10\n\n    def __init__(self, shift: int=1) -> None:\n        super().__init__()\n        self._shift = shift\n\n    def _render(self, alpha: Tensor, rgbs: Tensor) -> Tensor:\n        trans = torch.cumprod(1 - alpha + self._eps, dim=-2)\n        trans = torch.roll(trans, shifts=self._shift, dims=-2)\n        trans[..., :self._shift, :] = 1\n        weights = trans * alpha\n        rgbs_rendered = torch.sum(weights * rgbs, dim=-2)\n        return rgbs_rendered\n\n    def forward(self, rgbs: Tensor, densities: Tensor, points_3d: Tensor) -> Tensor:\n        raise NotImplementedError",
    "docstring": "Base class for volume rendering. Implementation follows Ben Mildenhall et el. (2020) at",
    "type": "class",
    "file_path": "kornia\\kornia\\nerf\\volume_renderer.py",
    "ast_data": "ClassDef name:VolumeRenderer Assign Assign FunctionDef name:__init__ arg:self arg:shift arguments arg arg Call Call Assign FunctionDef name:_render arg:self arg:alpha arg:rgbs arguments arg arg arg Assign Call Assign Call Assign Assign Assign Call Return return:yes FunctionDef name:forward arg:self arg:rgbs arg:densities arg:points_3d arguments arg arg arg arg Raise"
  },
  {
    "library": "django",
    "name": "log_response",
    "source_code": "def log_response(message, *args, response=None, request=None, logger=request_logger, level=None, exception=None):\n    if getattr(response, '_has_been_logged', False):\n        return\n    if level is None:\n        if response.status_code >= 500:\n            level = 'error'\n        elif response.status_code >= 400:\n            level = 'warning'\n        else:\n            level = 'info'\n    getattr(logger, level)(message, *args, extra={'status_code': response.status_code, 'request': request}, exc_info=exception)\n    response._has_been_logged = True",
    "docstring": "Log errors based on HttpResponse status. Log 5xx responses as errors and 4xx responses as warnings (unless a level is given as a keyword argument). The HttpResponse status_code and the request are passed to the logger's extra parameter.",
    "type": "function",
    "file_path": "django\\django\\utils\\log.py",
    "ast_data": "FunctionDef name:log_response arg:message arguments arg arg arg arg arg arg arg If Call Return return:no If Compare If Compare Assign If Compare Assign Assign Call Call Assign"
  },
  {
    "library": "seaborn",
    "name": "FontSize",
    "source_code": "class FontSize(IntervalProperty):\n    _legend = False\n\n    @property\n    def default_range(self) -> tuple[float, float]:\n        base = mpl.rcParams['font.size']\n        return (base * 0.5, base * 2)",
    "docstring": "Font size for textual marks, in points.",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "ClassDef name:FontSize Assign FunctionDef name:default_range arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "nanany",
    "source_code": "def nanany(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> bool:\n    if values.dtype.kind in 'iub' and mask is None:\n        return values.any(axis)\n    if values.dtype.kind == 'M':\n        raise TypeError(\"datetime64 type does not support operation 'any'\")\n    values, _ = _get_values(values, skipna, fill_value=False, mask=mask)\n    if values.dtype == object:\n        values = values.astype(bool)\n    return values.any(axis)",
    "docstring": "Check if any elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2]) >>> nanops.nanany(s.values) np.True_ >>> from pandas.core import nanops >>> s = pd.Series([np.nan]) >>> nanops.nanany(s.values) np.False_",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:nanany arg:values arguments arg arg arg arg If BoolOp Compare Compare Return return:yes Call If Compare Raise Call Assign Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "matrix_transpose",
    "source_code": "@tf_export('linalg.matrix_transpose', v1=['linalg.transpose', 'linalg.matrix_transpose', 'matrix_transpose'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('matrix_transpose', 'linalg.transpose')\ndef matrix_transpose(a, name='matrix_transpose', conjugate=False):\n    with ops.name_scope(name, values=[a]):\n        a = ops.convert_to_tensor(a, name='a')\n        a_shape = a.get_shape()\n        ndims = a_shape.ndims\n        if ndims is not None:\n            if ndims < 2:\n                raise ValueError(f'Argument `a` should be a (batch) matrix with rank >= 2.  Received `a` = {a} with shape: {a_shape}')\n            perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]\n        else:\n            a_rank = rank(a)\n            perm = concat((gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)\n        return transpose(a, perm=perm, conjugate=conjugate)",
    "docstring": "Transposes last two dimensions of tensor . For example: Note that provides kwargs allowing for transpose of arguments. This is done with minimal cost, and is preferable to using this function. E.g. @compatibility(numpy) In transposes are memory-efficient constant time operations as they simply return a new view of the same data with adjusted . TensorFlow does not support strides, returns a new tensor with the items permuted. @end_compatibility Args: a: A with . name: A name for the operation (optional). conjugate: Optional bool. Setting it to is mathematically equivalent to tf.math.conj(tf.linalg.matrix_transpose(input)). Returns: A transposed batch matrix . Raises: ValueError: If is determined statically to have .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:matrix_transpose arg:a arg:name arg:conjugate arguments arg arg arg With Call Assign Call Assign Call Assign If Compare If Compare Raise Call Assign Call Call Assign Call Assign Call Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_save_tensor_value_to_cache_op",
    "source_code": "def _save_tensor_value_to_cache_op(self, cache_idx, updates, graph):\n    updates = self._merge_tensor_signatures(updates)\n    updates = array_ops.reshape(updates, [1, self._num_signature_dimensions()])\n    indices = constant_op.constant([cache_idx])\n    cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG, graph)\n    return state_ops.scatter_update(cache, indices, updates).op",
    "docstring": "Returns an op that will save the given updates to an entry in the cache. Args: cache_idx: The cache index of the tensor within the cache. updates: A dictionary of the signature updates. graph: A TensorFlow graph. Returns: Cache update operation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_save_tensor_value_to_cache_op arg:self arg:cache_idx arg:updates arg:graph arguments arg arg arg arg Assign Call Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, name, distribution_fn, required_gpus=None, required_physical_gpus=0, required_tpu=False, use_cloud_tpu=False, has_chief=False, num_workers=1, num_ps=0, share_gpu=True, pool_runner_fn=None, no_xla=False):\n    object.__init__(self)\n    self._name = name\n    self._distribution_fn = distribution_fn\n    self.required_gpus = required_gpus\n    self.required_physical_gpus = required_physical_gpus\n    self.required_tpu = required_tpu\n    self.use_cloud_tpu = use_cloud_tpu\n    self.has_chief = has_chief\n    self.num_workers = num_workers\n    self.num_ps = num_ps\n    self.share_gpu = share_gpu\n    self._pool_runner_fn = pool_runner_fn\n    self.no_xla = no_xla",
    "docstring": "Initialize NamedDistribution. Args: name: Name that will be a part of the name of the test case. distribution_fn: A callable that creates a . required_gpus: The number of GPUs that the strategy requires. Only one of and should be set. required_physical_gpus: Number of physical GPUs required. Only one of and should be set. required_tpu: Whether the strategy requires TPU. use_cloud_tpu: Whether the strategy requires cloud TPU. has_chief: Whether the strategy requires a chief worker. num_workers: The number of workers that the strategy requires. num_ps: The number of parameter servers. share_gpu: Whether to share GPUs among workers. pool_runner_fn: An optional callable that returns a MultiProcessPoolRunner to run the test. no_xla: Whether to skip in XLA tests.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:name arg:distribution_fn arg:required_gpus arg:required_physical_gpus arg:required_tpu arg:use_cloud_tpu arg:has_chief arg:num_workers arg:num_ps arg:share_gpu arg:pool_runner_fn arg:no_xla arguments arg arg arg arg arg arg arg arg arg arg arg arg arg Call Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "cardinality",
    "source_code": "@tf_export('data.experimental.cardinality')\ndef cardinality(dataset):\n    return gen_dataset_ops.dataset_cardinality(dataset._variant_tensor)",
    "docstring": "Returns the cardinality of , if known. The operation returns the cardinality of . The operation may return if contains an infinite number of elements or if the analysis fails to determine the number of elements in (e.g. when the dataset source is a file). >>> dataset = tf.data.Dataset.range(42) >>> print(tf.data.experimental.cardinality(dataset).numpy()) 42 >>> dataset = dataset.repeat() >>> cardinality = tf.data.experimental.cardinality(dataset) >>> print((cardinality == tf.data.experimental.INFINITE_CARDINALITY).numpy()) True >>> dataset = dataset.filter(lambda x: True) >>> cardinality = tf.data.experimental.cardinality(dataset) >>> print((cardinality == tf.data.experimental.UNKNOWN_CARDINALITY).numpy()) True Args: dataset: A for which to determine cardinality. Returns: A scalar representing the cardinality of . If the cardinality is infinite or unknown, the operation returns the named constant and respectively.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\cardinality.py",
    "ast_data": "FunctionDef name:cardinality arg:dataset arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "slice_batch_indices",
    "source_code": "def slice_batch_indices(indices):\n    num_in_full_batch = num_full_batches * batch_size\n    first_k_indices = array_ops.slice(indices, [0], [num_in_full_batch])\n    first_k_indices = array_ops.reshape(first_k_indices, [num_full_batches, batch_size])\n    flat_dataset = dataset_ops.DatasetV2.from_tensor_slices(first_k_indices)\n    if self._partial_batch_size:\n        index_remainder = dataset_ops.DatasetV2.from_tensors(array_ops.slice(indices, [num_in_full_batch], [self._partial_batch_size]))\n        flat_dataset = flat_dataset.concatenate(index_remainder)\n    if shuffle == 'batch':\n        flat_dataset = flat_dataset.shuffle(1024).repeat(epochs)\n    return flat_dataset",
    "docstring": "Convert a Tensor of indices into a dataset of batched indices. This step can be accomplished in several ways. The most natural is to slice the Tensor in a Dataset map. (With a condition on the upper index to handle the partial batch.) However it turns out that coercing the Tensor into a shape which is divisible by the batch size (and handling the last partial batch separately) allows for a much more favorable memory access pattern and improved performance. Args: indices: Tensor which determines the data order for an entire epoch. Returns: A Dataset of batched indices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:slice_batch_indices arg:indices arguments arg Assign Assign Call Assign Call Assign Call If Assign Call Call Assign Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "Cursors",
    "source_code": "class Cursors(enum.IntEnum):\n    POINTER = enum.auto()\n    HAND = enum.auto()\n    SELECT_REGION = enum.auto()\n    MOVE = enum.auto()\n    WAIT = enum.auto()\n    RESIZE_HORIZONTAL = enum.auto()\n    RESIZE_VERTICAL = enum.auto()",
    "docstring": "Backend-independent cursor types.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:Cursors Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "BatchNorm2d",
    "source_code": "class BatchNorm2d(_BatchNorm):\n    _NNI_BN_RELU_MODULE = nni.BNReLU2d\n\n    def __init__(self, num_features, eps=1e-05, momentum=0.1, device=None, dtype=None) -> None:\n        factory_kwargs = {'device': device, 'dtype': dtype}\n        super().__init__(num_features, eps, momentum, **factory_kwargs)\n\n    def _get_name(self):\n        return 'QuantizedBatchNorm2d'\n\n    def _check_input_dim(self, input):\n        if len(input.shape) != 4:\n            raise ValueError('Input shape must be `(N, C, H, W)`!')\n\n    def forward(self, input: torch.Tensor) -> torch.Tensor:\n        return torch.ops.quantized.batch_norm2d(input, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.scale, self.zero_point)\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        return _BatchNorm.from_float(cls, mod, use_precomputed_fake_quant=use_precomputed_fake_quant)",
    "docstring": "This is the quantized version of :class:.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\batchnorm.py",
    "ast_data": "ClassDef name:BatchNorm2d Assign FunctionDef name:__init__ arg:self arg:num_features arg:eps arg:momentum arg:device arg:dtype arguments arg arg arg arg arg arg Assign Call Call FunctionDef name:_get_name arg:self arguments arg Return return:yes FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If Compare Call Raise Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "lazy_property",
    "source_code": "class lazy_property(Generic[T, R]):\n\n    def __init__(self, wrapped: Callable[[T], R]) -> None:\n        self.wrapped: Callable[[T], R] = wrapped\n        update_wrapper(self, wrapped)\n\n    @overload\n    def __get__(self, instance: None, obj_type: Any=None) -> '_lazy_property_and_property[T, R]':\n        ...\n\n    @overload\n    def __get__(self, instance: T, obj_type: Any=None) -> R:\n        ...\n\n    def __get__(self, instance: Union[T, None], obj_type: Any=None) -> 'R | _lazy_property_and_property[T, R]':\n        if instance is None:\n            return _lazy_property_and_property(self.wrapped)\n        with torch.enable_grad():\n            value = self.wrapped(instance)\n        setattr(instance, self.wrapped.__name__, value)\n        return value",
    "docstring": "Used as a decorator for lazy loading of class attributes. This uses a non-data descriptor that calls the wrapped method to compute the property on first call; thereafter replacing the wrapped method into an instance attribute.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\utils.py",
    "ast_data": "ClassDef name:lazy_property FunctionDef name:__init__ arg:self arg:wrapped arguments arg arg Call FunctionDef name:__get__ arg:self arg:instance arg:obj_type arguments arg arg arg FunctionDef name:__get__ arg:self arg:instance arg:obj_type arguments arg arg arg FunctionDef name:__get__ arg:self arg:instance arg:obj_type arguments arg arg arg If Compare Return return:yes Call With Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_check_shard_metadata_pair_overlap",
    "source_code": "def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata):\n    ndims = len(shard1.shard_offsets)\n    for i in range(ndims):\n        if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_sizes[i]:\n            return False\n        if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_sizes[i]:\n            return False\n    return True",
    "docstring": "Checks if two shards overlap.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\_internals.py",
    "ast_data": "FunctionDef name:_check_shard_metadata_pair_overlap arg:shard1 arg:shard2 arguments arg arg Assign Call For Call If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_scalar_batch",
    "source_code": "def is_scalar_batch(self, name='is_scalar_batch'):\n    with self._name_scope(name):\n        return ops.convert_to_tensor(self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor), name='is_scalar_batch')",
    "docstring": "Indicates that . Args: name: Python prepended to names of ops created by this function. Returns: is_scalar_batch: scalar .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:is_scalar_batch arg:self arg:name arguments arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "merge_source_suffix",
    "source_code": "def merge_source_suffix(app: Sphinx, config: Config) -> None:\n    for suffix, filetype in app.registry.source_suffix.items():\n        if suffix not in app.config.source_suffix:\n            app.config.source_suffix[suffix] = filetype\n        elif app.config.source_suffix[suffix] == 'restructuredtext':\n            app.config.source_suffix[suffix] = filetype\n        elif app.config.source_suffix[suffix] is None:\n            msg = __('`None` is not a valid filetype for %r.') % suffix\n            logger.warning(msg)\n            app.config.source_suffix[suffix] = filetype\n    app.registry.source_suffix = app.config.source_suffix",
    "docstring": "Merge any user-specified source_suffix with any added by extensions.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\registry.py",
    "ast_data": "FunctionDef name:merge_source_suffix arg:app arg:config arguments arg arg For Call If Compare Assign If Compare Assign If Compare Assign Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "tpu_wrap_trace_fn",
    "source_code": "def tpu_wrap_trace_fn(tensor, out_tensor_name):\n    tensor_trace_fn = self._make_tensor_trace_fun(out_tensor_name, tensor_trace_order)\n    if on_tpu:\n        return tpu_replication.outside_compilation(tensor_trace_fn, tensor)\n    else:\n        return tensor_trace_fn(tensor)",
    "docstring": "Wraps the trace_fn with outside compilation if on TPUs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:tpu_wrap_trace_fn arg:tensor arg:out_tensor_name arguments arg arg Assign Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_colors_from_color",
    "source_code": "def _get_colors_from_color(color: Color | Collection[Color]) -> list[Color]:\n    if len(color) == 0:\n        raise ValueError(f'Invalid color argument: {color}')\n    if _is_single_color(color):\n        color = cast(Color, color)\n        return [color]\n    color = cast(Collection[Color], color)\n    return list(_gen_list_of_colors_from_iterable(color))",
    "docstring": "Get colors from user input color.",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\style.py",
    "ast_data": "FunctionDef name:_get_colors_from_color arg:color arguments arg If Compare Call Raise Call If Call Assign Call Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_campos_zeros",
    "source_code": "def _campos_zeros(n):\n    if n == 1:\n        return asarray([-1 + 0j])\n    s = npp_polyval(n, [0, 0, 2, 0, -3, 1])\n    b3 = npp_polyval(n, [16, -8]) / s\n    b2 = npp_polyval(n, [-24, -12, 12]) / s\n    b1 = npp_polyval(n, [8, 24, -12, -2]) / s\n    b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s\n    r = npp_polyval(n, [0, 0, 2, 1])\n    a1 = npp_polyval(n, [-6, -6]) / r\n    a2 = 6 / r\n    k = np.arange(1, n + 1)\n    x = npp_polyval(k, [0, a1, a2])\n    y = npp_polyval(k, [b0, b1, b2, b3])\n    return x + 1j * y",
    "docstring": "Return approximate zero locations of Bessel polynomials y_n(x) for order using polynomial fit (Campos-Calderon 2011)",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_campos_zeros arg:n arguments arg If Compare Return return:yes Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "pdf",
    "source_code": "def pdf(self, x):\n    return np.exp(self.logpdf(x))",
    "docstring": "Parameters ---------- x : array_like Points at which to evaluate the log of the probability density function. The last axis of must correspond to unit vectors of the same dimensionality as the distribution. Returns ------- pdf : ndarray or scalar Probability density function evaluated at .",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:pdf arg:self arg:x arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "uses_star_args_or_kwargs_in_call",
    "source_code": "def uses_star_args_or_kwargs_in_call(node):\n    return uses_star_args_in_call(node) or uses_star_kwargs_in_call(node)",
    "docstring": "Check if an ast.Call node uses arbitrary-length *args or **kwargs. This function works with the AST call node format of Python3.5+ as well as the different AST format of earlier versions of Python. Args: node: The ast.Call node to check arg values for. Returns: True if the node uses starred variadic positional args or keyword args. False if it does not.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "FunctionDef name:uses_star_args_or_kwargs_in_call arg:node arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "matplotlib",
    "name": "update_from_data_xy",
    "source_code": "def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):\n    if len(xy) == 0:\n        return\n    path = Path(xy)\n    self.update_from_path(path, ignore=ignore, updatex=updatex, updatey=updatey)",
    "docstring": "Update the bounds based on the passed in *xy* coordinates. After updating, the bounds will have positive *width* and *height*; *x0* and *y0* will be the minimal values. Parameters ---------- xy : (N, 2) array-like The (x, y) coordinates. ignore : bool, optional - When `BboxBboxignore`, update the x/y values.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:update_from_data_xy arg:self arg:xy arg:ignore arg:updatex arg:updatey arguments arg arg arg arg arg If Compare Call Return return:no Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "sqrt",
    "source_code": "@tf_export('math.sqrt', 'sqrt')\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef sqrt(x, name=None):\n    return gen_math_ops.sqrt(x, name)",
    "docstring": "Computes element-wise square root of the input tensor. Note: This operation does not support integer types. >>> x = tf.constant([[4.0], [16.0]]) >>> tf.sqrt(x) >>> y = tf.constant([[-4.0], [16.0]]) >>> tf.sqrt(y) >>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128) >>> tf.sqrt(z) Note: In order to support complex type, please provide an input tensor of or . Args: x: A of type , , , , , name: A name for the operation (optional). Returns: A of same size, type and sparsity as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:sqrt arg:x arg:name arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "_write_to_transport",
    "source_code": "def _write_to_transport(self) -> None:\n    assert self.transport is not None\n    self.resetTimeout()\n    data = self.conn.data_to_send()\n    self.transport.write(data)",
    "docstring": "Write data to the underlying transport connection from the HTTP2 connection instance if any",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py",
    "ast_data": "FunctionDef name:_write_to_transport arg:self arguments arg Compare Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "gibbs",
    "source_code": "def gibbs(self, v):\n    check_is_fitted(self)\n    if not hasattr(self, 'random_state_'):\n        self.random_state_ = check_random_state(self.random_state)\n    h_ = self._sample_hiddens(v, self.random_state_)\n    v_ = self._sample_visibles(h_, self.random_state_)\n    return v_",
    "docstring": "Perform one Gibbs sampling step. Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer to start from. Returns ------- v_new : ndarray of shape (n_samples, n_features) Values of the visible layer after one Gibbs step.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_rbm.py",
    "ast_data": "FunctionDef name:gibbs arg:self arg:v arguments arg arg Call If Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_pipelined_all_gather_and_consume",
    "source_code": "def _pipelined_all_gather_and_consume(shard: torch.Tensor, shard_consumer: Callable[[torch.Tensor, int], None], ag_out: torch.Tensor, group_name: str, ag_out_needed: bool=True) -> None:\n\n    def adapter(shard: list[torch.Tensor], rank: int) -> None:\n        shard_consumer(shard[0], rank)\n    _pipelined_multi_all_gather_and_consume([shard], adapter, [ag_out], group_name, ag_out_needed)",
    "docstring": "Perform the following logic with micro-pipelined computation and communication: ag_out = all_gather_tensor(shard, gather_dim=0, group=group) shards = ag_out.chunk(group.size()) for src_rank, shard in enumerate(shards): shard_consumer(shard, src_rank)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py",
    "ast_data": "FunctionDef name:_pipelined_all_gather_and_consume arg:shard arg:shard_consumer arg:ag_out arg:group_name arg:ag_out_needed arguments arg arg arg arg arg FunctionDef name:adapter arg:shard arg:rank arguments arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "_initialize_permutations",
    "source_code": "def _initialize_permutations(self) -> None:\n    self._permutations: list = [None] * len(self.base)\n    if self.scramble:\n        for i, bdim in enumerate(self.base):\n            permutations = _van_der_corput_permutations(base=bdim, rng=self.rng)\n            self._permutations[i] = permutations",
    "docstring": "Initialize permutations for all Van der Corput sequences. Permutations are only needed for scrambling.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:_initialize_permutations arg:self arguments arg Call If For Call Assign Call Assign"
  },
  {
    "library": "sphinx",
    "name": "writesep",
    "source_code": "def writesep(char: str='-', lineno: int | None=None) -> str:\n    out: list[str] = []\n    for colno, width in enumerate(self.measured_widths):\n        if lineno is not None and lineno > 0 and (self[lineno, colno] is self[lineno - 1, colno]):\n            out.append(' ' * (width + 2))\n        else:\n            out.append(char * (width + 2))\n    head = '+' if out[0][0] == '-' else '|'\n    tail = '+' if out[-1][0] == '-' else '|'\n    glue = ['+' if left[0] == '-' or right[0] == '-' else '|' for left, right in pairwise(out)]\n    glue.append(tail)\n    return head + ''.join(chain.from_iterable(zip(out, glue, strict=False)))",
    "docstring": "Called on the line *before* lineno. Called with no *lineno* for the last sep.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "FunctionDef name:writesep arg:char arg:lineno arguments arg arg For Call If BoolOp Compare Compare Compare Call Call Assign Compare Assign Compare Assign BoolOp Compare Compare Call Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "BartelsConn",
    "source_code": "class BartelsConn(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-500.0] * self.N, [500.0] * self.N))\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 1.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return abs(x[0] ** 2.0 + x[1] ** 2.0 + x[0] * x[1]) + abs(sin(x[0])) + abs(cos(x[1]))",
    "docstring": "Bartels-Conn objective function. The BartelsConn [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{BartelsConn}}(x) = \\lvert {x_1^2 + x_2^2 + x_1x_2} \\rvert + \\lvert {\\sin(x_1)} \\rvert + \\lvert {\\cos(x_2)} \\rvert with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:BartelsConn FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "json_handler",
    "source_code": "def json_handler(*args, **kwargs):\n    value = cherrypy.serving.request._json_inner_handler(*args, **kwargs)\n    return json.encode(value)",
    "docstring": "Convert decorated HTTP handler-returned object to JSON string.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\jsontools.py",
    "ast_data": "FunctionDef name:json_handler arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_VariantDataset",
    "source_code": "class _VariantDataset(DatasetV2):\n\n    def __init__(self, dataset_variant, element_spec):\n        self._element_spec = element_spec\n        super(_VariantDataset, self).__init__(dataset_variant)\n\n    def _inputs(self):\n        return []\n\n    @property\n    def element_spec(self):\n        return self._element_spec",
    "docstring": "A Dataset wrapper around a -typed function argument.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "ClassDef name:_VariantDataset FunctionDef name:__init__ arg:self arg:dataset_variant arg:element_spec arguments arg arg arg Assign Call Call FunctionDef name:_inputs arg:self arguments arg Return return:no FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_feature_use",
    "source_code": "def set_feature_use(feature: str, usage: bool):\n    if get_metrics_context().in_progress():\n        get_metrics_context().set_key_value('feature_usage', feature, usage)",
    "docstring": "Records whether we are using a feature Generally a feature is a JK.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:set_feature_use arg:feature arg:usage arguments arg arg If Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, reflection_axis, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name='LinearOperatorHouseholder'):\n    parameters = dict(reflection_axis=reflection_axis, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name)\n    with ops.name_scope(name, values=[reflection_axis]):\n        self._reflection_axis = linear_operator_util.convert_nonref_to_tensor(reflection_axis, name='reflection_axis')\n        self._check_reflection_axis(self._reflection_axis)\n        if is_self_adjoint is False:\n            raise ValueError('A Householder operator is always self adjoint.')\n        else:\n            is_self_adjoint = True\n        if is_positive_definite is True:\n            raise ValueError('A Householder operator is always non-positive definite.')\n        else:\n            is_positive_definite = False\n        if is_square is False:\n            raise ValueError('A Householder operator is always square.')\n        is_square = True\n        super(LinearOperatorHouseholder, self).__init__(dtype=self._reflection_axis.dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)",
    "docstring": "Initialize a . Args: reflection_axis: Shape with . The vector defining the hyperplane to reflect about. Allowed dtypes: , , , , . is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. This is autoset to true is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form has positive real part for all nonzero . Note that we do not require the operator to be self-adjoint to be positive-definite. See: This is autoset to false. is_square: Expect that this operator acts like square [batch] matrices. This is autoset to true. name: A name for this . Raises: ValueError: is not , is not or is not .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_householder.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:reflection_axis arg:is_non_singular arg:is_self_adjoint arg:is_positive_definite arg:is_square arg:name arguments arg arg arg arg arg arg arg Assign Call With Call Assign Call Call If Compare Raise Call Assign If Compare Raise Call Assign If Compare Raise Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "register_binary_elementwise_assert_api",
    "source_code": "def register_binary_elementwise_assert_api(func):\n    _BINARY_ELEMENTWISE_ASSERT_APIS.append(func)\n    for args, handler in _ELEMENTWISE_API_HANDLERS.items():\n        if len(args) == 3 and args[2] is _ASSERT_API_TAG:\n            _add_dispatch_for_binary_elementwise_api(func, args[0], args[1], handler)\n    return func",
    "docstring": "Decorator that registers a TensorFlow op as a binary elementwise assert API. Different from , this decorator is used for assert apis, such as assert_equal, assert_none_equal, etc, which return None in eager mode and an op in graph mode. Args: func: The function that implements the binary elementwise assert API. Returns:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:register_binary_elementwise_assert_api arg:func arguments arg Call For Call If BoolOp Compare Call Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "CloughTocherInterpolatorValues",
    "source_code": "class CloughTocherInterpolatorValues(interpolate.CloughTocher2DInterpolator):\n\n    def __init__(self, points, xi, tol=1e-06, maxiter=400, **kwargs):\n        interpolate.CloughTocher2DInterpolator.__init__(self, points, None, tol=tol, maxiter=maxiter)\n        self.xi = None\n        self._preprocess_xi(*xi)\n\n    def _preprocess_xi(self, *args):\n        if self.xi is None:\n            self.xi, self.interpolation_points_shape = interpolate.CloughTocher2DInterpolator._preprocess_xi(self, *args)\n        return (self.xi, self.interpolation_points_shape)\n\n    def __call__(self, values):\n        self._set_values(values)\n        return super().__call__(self.xi)",
    "docstring": "Subclass of the CT2DInterpolator with optional . This is mainly a demo of the functionality. See for discussion",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\interpolate.py",
    "ast_data": "ClassDef name:CloughTocherInterpolatorValues FunctionDef name:__init__ arg:self arg:points arg:xi arg:tol arg:maxiter arguments arg arg arg arg arg arg Call Assign Call FunctionDef name:_preprocess_xi arg:self arguments arg arg If Compare Assign Call Return return:yes FunctionDef name:__call__ arg:self arg:values arguments arg arg Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "as_str_any",
    "source_code": "@tf_export('compat.as_str_any')\ndef as_str_any(value, encoding='utf-8'):\n    if isinstance(value, bytes):\n        return as_str(value, encoding=encoding)\n    else:\n        return str(value)",
    "docstring": "Converts input to type. Uses , except for typed inputs, which are converted using . Args: value: A object that can be converted to . encoding: Encoding for typed inputs. Returns: A object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\compat.py",
    "ast_data": "FunctionDef name:as_str_any arg:value arg:encoding arguments arg arg If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "all_to_all_single_autograd",
    "source_code": "def all_to_all_single_autograd(self: torch.Tensor, output_split_sizes: Optional[list[int]], input_split_sizes: Optional[list[int]], group: RANK_TYPES, tag: str='') -> torch.Tensor:\n    if output_split_sizes is not None:\n        assert all((isinstance(size, (int, torch.SymInt)) for size in output_split_sizes)), output_split_sizes\n    if input_split_sizes is not None:\n        assert all((isinstance(size, (int, torch.SymInt)) for size in input_split_sizes)), input_split_sizes\n    group_name = _resolve_group_name(group, tag)\n    group_size = c10d._get_group_size_by_name(group_name)\n    if output_split_sizes is None or input_split_sizes is None:\n        assert output_split_sizes is None and input_split_sizes is None, 'output_split_sizes and input_split_sizes must either be specified together or both set to None'\n        output_split_sizes = [self.shape[0] // group_size] * group_size\n        input_split_sizes = output_split_sizes\n    tensor = torch.ops._c10d_functional_autograd.all_to_all_single(self, output_split_sizes, input_split_sizes, group_name)\n    return _FromTorchTensor.apply(tensor)",
    "docstring": "Same as all_to_all_single but supports autograd.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:all_to_all_single_autograd arg:self arg:output_split_sizes arg:input_split_sizes arg:group arg:tag arguments arg arg arg arg arg If Compare Call Call If Compare Call Call Assign Call Assign Call If BoolOp Compare Compare BoolOp Compare Compare Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_display",
    "source_code": "def _display(self, pf: PythonFileT, results: list[LintResult]) -> Iterator[str]:\n    for r in results:\n        if self.args.lintrunner:\n            msg = r.as_message(code=self.code, path=str(pf.path))\n            yield json.dumps(msg.asdict(), sort_keys=True)\n        else:\n            if self.result_shown:\n                yield ''\n            else:\n                self.result_shown = True\n            if r.line is None:\n                yield f'{pf.path}: {r.name}'\n            else:\n                yield from (i.rstrip() for i in self._display_window(pf, r))",
    "docstring": "Emit a series of human-readable strings representing the results",
    "type": "method",
    "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py",
    "ast_data": "FunctionDef name:_display arg:self arg:pf arg:results arguments arg arg arg For If Assign Call Call Call Call If Assign If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "_node_metadata_hook",
    "source_code": "def _node_metadata_hook(node: torch.fx.Node, stack_trace: Optional[str]=None) -> None:\n    assert node.op == 'call_function' and callable(node.target)\n    arg_meta = [arg.meta for arg in node.args if isinstance(arg, torch.fx.Node)]\n    assert len(arg_meta) >= 1\n    arg_meta = arg_meta[0]\n    if isinstance(node.target, torch._ops.OpOverload) and len(node.target._schema.returns) == 0:\n        node.meta['val'] = None\n    else:\n        fake_args = [arg.meta['val'] if isinstance(arg, torch.fx.Node) else arg for arg in node.args]\n        fake_res = node.target(*fake_args)\n        node.meta['val'] = fake_res\n    node.meta['stack_trace'] = stack_trace\n    node.meta['nn_module_stack'] = arg_meta.get('nn_module_stack', {_EMPTY_NN_MODULE_STACK_KEY: (_EMPTY_NN_MODULE_STACK_KEY, _EMPTY_NN_MODULE_STACK_KEY)})\n    node.meta['torch_fn'] = (f'{node.target.__name__}_0', f'{node.target.__class__.__name__}.{node.target.__name__}')",
    "docstring": "Hook for adding the appropriate metadata to nodes that are created during a pass using graph.create_node. An example of how to use it: This hook should not work for all generic cases -- specifically it assumes that nodes being added are only call_function nodes, and copies over the first argument node's nn_module_stack.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\_node_metadata_hook.py",
    "ast_data": "FunctionDef name:_node_metadata_hook arg:node arg:stack_trace arguments arg arg BoolOp Compare Call Assign Call Compare Call Assign If BoolOp Call Compare Call Assign Assign Call Assign Call Assign Assign Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "_get_tick_info",
    "source_code": "def _get_tick_info(self, tick_iter):\n    ticks_loc_angle = []\n    ticklabels_loc_angle_label = []\n    ticklabel_add_angle = self._ticklabel_add_angle\n    for loc, angle_normal, angle_tangent, label in tick_iter:\n        angle_label = angle_tangent - 90 + ticklabel_add_angle\n        angle_tick = angle_normal if 90 <= (angle_label - angle_normal) % 360 <= 270 else angle_normal + 180\n        ticks_loc_angle.append([loc, angle_tick])\n        ticklabels_loc_angle_label.append([loc, angle_label, label])\n    return (ticks_loc_angle, ticklabels_loc_angle_label)",
    "docstring": "Return a pair of: - list of locs and angles for ticks - list of locs, angles and labels for ticklabels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py",
    "ast_data": "FunctionDef name:_get_tick_info arg:self arg:tick_iter arguments arg arg Assign Assign Assign For Assign Assign Compare Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_cdf",
    "source_code": "def _cdf(self, x, beta, m):\n    N = 1.0 / (m / beta / (m - 1) * np.exp(-beta ** 2 / 2.0) + _norm_pdf_C * _norm_cdf(beta))\n\n    def rhs(x, beta, m):\n        return m / beta * np.exp(-beta ** 2 / 2.0) / (m - 1) + _norm_pdf_C * (_norm_cdf(x) - _norm_cdf(-beta))\n\n    def lhs(x, beta, m):\n        return (m / beta) ** m * np.exp(-beta ** 2 / 2.0) * (m / beta - beta - x) ** (-m + 1) / (m - 1)\n    return N * xpx.apply_where(x > -beta, (x, beta, m), rhs, lhs)",
    "docstring": "Return CDF of the crystalball function",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_cdf arg:self arg:x arg:beta arg:m arguments arg arg arg arg Assign Call Call FunctionDef name:rhs arg:x arg:beta arg:m arguments arg arg arg Return return:yes Call Call Call FunctionDef name:lhs arg:x arg:beta arg:m arguments arg arg arg Return return:yes Call Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "maybe_set_lowering_attr",
    "source_code": "def maybe_set_lowering_attr(op, lower_using_switch_merge=None):\n    if lower_using_switch_merge is not None:\n        op._set_attr('_lower_using_switch_merge', attr_value_pb2.AttrValue(b=lower_using_switch_merge))\n    elif not _DISABLE_LOWER_USING_SWITCH_MERGE and (not control_flow_util.GraphOrParentsInXlaContext(op.graph)) and (context.context().function_call_options.executor_type != 'SINGLE_THREADED_EXECUTOR'):\n        op._set_attr('_lower_using_switch_merge', attr_value_pb2.AttrValue(b=True))",
    "docstring": "Sets the flag to enable lowering on if necessary. Lowering allows cond_v2 and while_v2 to avoid some of the limitations of Functions, allowing users to specify devices & colocation inside of cond_v2 and while_v2 input functions, and enabling non-strict evaluation & partial pruning. This brings v2 control flow closer to feature parity with v1 control flow. However, we do not lower in the following cases: - When the or ops are in the XLA context. Because it is easier for XLA to apply its own optimizations when dealing with un-lowered control flow operators than with low-level control flow primitives. - When the eager execution context specifies the executor of functions to be the single threaded executor (see context.function_executor_type()). Because the single threaded executor does not support v1 control flow ops. - When 'lower_using_switch_merge' is explicitly set to False. Args: op: An or Operation. lower_using_switch_merge: Explicit value to lower or not (optional).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py",
    "ast_data": "FunctionDef name:maybe_set_lowering_attr arg:op arg:lower_using_switch_merge arguments arg arg If Compare Call Call If BoolOp Call Compare Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "split",
    "source_code": "def split(self, X, y=None, groups=None):\n    if groups is not None:\n        warnings.warn(f'The groups parameter is ignored by {self.__class__.__name__}', UserWarning)\n    return self._split(X)",
    "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Always ignored, exists for compatibility. groups : array-like of shape (n_samples,) Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:split arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "value",
    "source_code": "@property\ndef value(self) -> float:\n    return self._value",
    "docstring": "The binary representation of the missing value. Returns ------- {int, float} The binary representation of the missing value.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "WithEffects",
    "source_code": "class WithEffects(HigherOrderOperator):\n\n    def __init__(self) -> None:\n        super().__init__('with_effects')\n\n    def __call__(self, token, op: OpType, *args: tuple[Any, ...], **kwargs: dict[str, Any]) -> tuple[Any, ...]:\n        assert isinstance(op, (torch._ops.HigherOrderOperator, torch._ops.OpOverload))\n        assert not has_aliasing(op), 'Ops with aliasing is not supported'\n        assert has_effects(op, args, kwargs)\n        assert isinstance(kwargs, dict)\n        return super().__call__(token, op, *args, **kwargs)",
    "docstring": "with_effects(token, op, args, kwargs) -> (new_token, op_results) This HOP helps ensure ordering between side effectful ops like prints or ops using torchbind objects. This is needed to ensure a traced graph from AOTAutograd is functional so that future optimization passes do not reorder these operators. This is done through threading \"effect tokens\" through the graph to enforce data dependence between side effectful ops. The tokens are basically dummy values (torch.tensor([])). We create a token per \"effect type\", which are enumerated in the _EffectType enum.",
    "type": "class",
    "file_path": "pytorch\\torch\\_higher_order_ops\\effects.py",
    "ast_data": "ClassDef name:WithEffects FunctionDef name:__init__ arg:self arguments arg Call Call FunctionDef name:__call__ arg:self arg:token arg:op arguments arg arg arg arg arg Call Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_device",
    "source_code": "@classmethod\ndef from_device(cls, device: torch.device) -> '_FSDPDeviceHandle':\n    if device.type == 'cuda':\n        return cast(_FSDPDeviceHandle, torch.cuda)\n    elif device.type == 'mtia':\n        return cast(_FSDPDeviceHandle, torch.mtia)\n    return cls(device)",
    "docstring": "Return a device handle corresponding to the device, and through this handle, operations with the same semantics as CUDA can be performed on the device. Just return torch.cuda if the device is cuda to make attribute-access faster. Custom backend must first register a module with the same name with {device.type} on torch.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_common_utils.py",
    "ast_data": "FunctionDef name:from_device arg:cls arg:device arguments arg arg If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_dir",
    "source_code": "def _get_dir(self, dirs: list[str]) -> 'Directory':\n    if len(dirs) == 0:\n        return self\n    dir_name = dirs[0]\n    if dir_name not in self.children:\n        self.children[dir_name] = Directory(dir_name, True)\n    return self.children[dir_name]._get_dir(dirs[1:])",
    "docstring": "Builds path of Directories if not yet built and returns last directory in list. Args: dirs (List[str]): List of directory names that are treated like a path. Returns: :class:: The last Directory specified in the dirs list.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\file_structure_representation.py",
    "ast_data": "FunctionDef name:_get_dir arg:self arg:dirs arguments arg arg If Compare Call Return return:yes Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "use_overline",
    "source_code": "def use_overline(self, use_overline):\n    self._use_overline = use_overline",
    "docstring": "Switch display mode with overline for labelling p>1/2. Parameters ---------- use_overline : bool If x > 1/2, with x = 1 - v, indicate if x should be displayed as $\\overline{v}$. The default is to display $1 - v$.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:use_overline arg:self arg:use_overline arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "_all_saveable_objects",
    "source_code": "def _all_saveable_objects(scope=None):\n    return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope) + ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS, scope)",
    "docstring": "Returns all variables and s that must be checkpointed. Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose attribute matches using . Items without a attribute are never returned if a scope is supplied. The choice of means that a without special tokens filters by prefix. Returns: A list of and to be checkpointed",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:_all_saveable_objects arg:scope arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ExtremeFinderSimple",
    "source_code": "class ExtremeFinderSimple:\n\n    def __init__(self, nx, ny):\n        self.nx = nx\n        self.ny = ny\n\n    def __call__(self, transform_xy, x1, y1, x2, y2):\n        tbbox = self._find_transformed_bbox(_User2DTransform(transform_xy, None), Bbox.from_extents(x1, y1, x2, y2))\n        return (tbbox.x0, tbbox.x1, tbbox.y0, tbbox.y1)\n\n    def _find_transformed_bbox(self, trans, bbox):\n        grid = np.reshape(np.meshgrid(np.linspace(bbox.x0, bbox.x1, self.nx), np.linspace(bbox.y0, bbox.y1, self.ny)), (2, -1)).T\n        tbbox = Bbox.null()\n        tbbox.update_from_data_xy(trans.transform(grid))\n        return tbbox.expanded(1 + 2 / self.nx, 1 + 2 / self.ny)",
    "docstring": "A helper class to figure out the range of grid lines that need to be drawn.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py",
    "ast_data": "ClassDef name:ExtremeFinderSimple FunctionDef name:__init__ arg:self arg:nx arg:ny arguments arg arg arg Assign Assign FunctionDef name:__call__ arg:self arg:transform_xy arg:x1 arg:y1 arg:x2 arg:y2 arguments arg arg arg arg arg arg Assign Call Call Call Return return:yes FunctionDef name:_find_transformed_bbox arg:self arg:trans arg:bbox arguments arg arg arg Assign Call Call Call Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_export_to_saved_model_graph",
    "source_code": "def _export_to_saved_model_graph(self, object_map, tensor_map, options, **kwargs):\n    first_var = self._vars[0]\n    resource_list = first_var._export_to_saved_model_graph(object_map, tensor_map, options, **kwargs)\n    for v in self._vars[1:]:\n        object_map[v] = object_map[first_var]\n        tensor_map[v.handle] = tensor_map[first_var.handle]\n        resource_list.append(v.handle)\n    object_map[self] = object_map[first_var]\n    tensor_map[self] = tensor_map[first_var.handle]\n    resource_list.append(self)\n    return resource_list",
    "docstring": "For implementing .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py",
    "ast_data": "FunctionDef name:_export_to_saved_model_graph arg:self arg:object_map arg:tensor_map arg:options arguments arg arg arg arg arg Assign Assign Call For Assign Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_set_ext_ring",
    "source_code": "def _set_ext_ring(self, ring):\n    self[0] = ring",
    "docstring": "Set the exterior ring of the Polygon.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py",
    "ast_data": "FunctionDef name:_set_ext_ring arg:self arg:ring arguments arg arg Assign"
  },
  {
    "library": "pandas",
    "name": "_repr_data_resource_",
    "source_code": "@final\ndef _repr_data_resource_(self):\n    if config.get_option('display.html.table_schema'):\n        data = self.head(config.get_option('display.max_rows'))\n        as_json = data.to_json(orient='table')\n        as_json = cast(str, as_json)\n        return loads(as_json, object_pairs_hook=collections.OrderedDict)",
    "docstring": "Not a real Jupyter special repr method, but we use the same naming convention.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_repr_data_resource_ arg:self arguments arg If Call Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_signature_for_torch_op",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef get_signature_for_torch_op(op: Callable, return_schemas: bool=False):\n    if isinstance(op, OpOverload):\n        schemas = [op._schema]\n    elif isinstance(op, OpOverloadPacket):\n        schemas = [getattr(op, overload)._schema for overload in op.overloads()]\n    else:\n        override = _manual_overrides.get(op)\n        if override:\n            return (override, None) if return_schemas else None\n        aten_fn = torch.jit._builtins._find_builtin(op)\n        if aten_fn is None:\n            return (None, None) if return_schemas else None\n        schemas = torch._C._jit_get_schemas_for_operator(aten_fn)\n    signatures = [_torchscript_schema_to_signature(schema) for schema in schemas]\n    return (signatures, schemas) if return_schemas else signatures",
    "docstring": "Given an operator on the namespace, return a list of objects corresponding to the overloads of that op.. May return if a signature could not be retrieved. Args: op (Callable): An operator on the namespace to look up a signature for Returns: Optional[List[inspect.Signature]]: A list of signatures for the overloads of this operator, or None if the operator signatures could not be retrieved. If return_schemas=True, returns a tuple containing the optional Python signatures and the optional TorchScript Function signature",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\operator_schemas.py",
    "ast_data": "FunctionDef name:get_signature_for_torch_op arg:op arg:return_schemas arguments arg arg If Call Assign If Call Assign Call Call Assign Call If Return return:yes Assign Call If Compare Return return:yes Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "write",
    "source_code": "def write(self, data):\n    if self.compressobj is None:\n        self.file.write(data)\n    else:\n        compressed = self.compressobj.compress(data)\n        self.file.write(compressed)",
    "docstring": "Write some data on the stream.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:write arg:self arg:data arguments arg arg If Compare Call Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "reset_index",
    "source_code": "def reset_index(self, level: IndexLabel | None=None, *, drop: bool=False, name: Level=lib.no_default, inplace: bool=False, allow_duplicates: bool=False) -> DataFrame | Series | None:\n    inplace = validate_bool_kwarg(inplace, 'inplace')\n    if drop:\n        new_index = default_index(len(self))\n        if level is not None:\n            level_list: Sequence[Hashable]\n            if not isinstance(level, (tuple, list)):\n                level_list = [level]\n            else:\n                level_list = level\n            level_list = [self.index._get_level_number(lev) for lev in level_list]\n            if len(level_list) < self.index.nlevels:\n                new_index = self.index.droplevel(level_list)\n        if inplace:\n            self.index = new_index\n        else:\n            new_ser = self.copy(deep=False)\n            new_ser.index = new_index\n            return new_ser.__finalize__(self, method='reset_index')\n    elif inplace:\n        raise TypeError('Cannot reset_index inplace on a Series to create a DataFrame')\n    else:\n        if name is lib.no_default:\n            if self.name is None:\n                name = 0\n            else:\n                name = self.name\n        df = self.to_frame(name)\n        return df.reset_index(level=level, drop=drop, allow_duplicates=allow_duplicates)\n    return None",
    "docstring": "Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses `dropdropdropSeriesnamedroplevellevellevel` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:reset_index arg:self arg:level arguments arg arg arg arg arg arg Assign Call If Assign Call Call If Compare If Call Assign Assign Assign Call If Compare Call Assign Call If Assign Assign Call Assign Return return:yes Call If Raise Call If Compare If Compare Assign Assign Assign Call Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_null_coalesce_accumulate",
    "source_code": "def _null_coalesce_accumulate(lhs, rhs):\n    if lhs is None:\n        return rhs\n    elif rhs is None:\n        return lhs\n    else:\n        return torch.add(lhs, rhs)",
    "docstring": "Coalesce two values, even if one of them is null, returning the non-null value.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_backward.py",
    "ast_data": "FunctionDef name:_null_coalesce_accumulate arg:lhs arg:rhs arguments arg arg If Compare Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "SphinxI18nReader",
    "source_code": "class SphinxI18nReader(SphinxBaseReader):\n\n    def setup(self, app: Sphinx) -> None:\n        super().setup(app)\n        self.transforms = self.transforms + app.registry.get_transforms()\n        unused = [PreserveTranslatableMessages, Locale, RemoveTranslatableInline, AutoIndexUpgrader, SphinxDomains, DoctreeReadEvent, UIDTransform]\n        for transform in unused:\n            if transform in self.transforms:\n                self.transforms.remove(transform)",
    "docstring": "A document reader for i18n. This returns the source line number of original text as current source line number to let users know where the error happened. Because the translated texts are partial and they don't have correct line numbers.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\io.py",
    "ast_data": "ClassDef name:SphinxI18nReader FunctionDef name:setup arg:self arg:app arguments arg arg Call Call Assign Call Assign For If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "PrefetchBenchmark",
    "source_code": "class PrefetchBenchmark(benchmark_base.DatasetBenchmarkBase):\n\n    def benchmark_prefetch(self):\n        num_elements = 1000000\n        for prefetch_buffer in [1, 5, 10, 20, 100]:\n            dataset = dataset_ops.Dataset.range(num_elements)\n            dataset = dataset.prefetch(prefetch_buffer)\n            self.run_and_report_benchmark(dataset, num_elements=num_elements, extras={'model_name': 'prefetch.benchmark.1', 'parameters': '%d' % prefetch_buffer}, name='prefetch_{}'.format(prefetch_buffer))",
    "docstring": "Benchmarks for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\prefetch_benchmark.py",
    "ast_data": "ClassDef name:PrefetchBenchmark FunctionDef name:benchmark_prefetch arg:self arguments arg Assign For Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_diag",
    "source_code": "def _check_diag(self, diag):\n    if diag.shape.ndims is not None and diag.shape.ndims < 1:\n        raise ValueError('Argument diag must have at least 1 dimension.  Found: %s' % diag)",
    "docstring": "Static check of diag.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_diag.py",
    "ast_data": "FunctionDef name:_check_diag arg:self arg:diag arguments arg arg If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "uvicorn",
    "name": "process_request",
    "source_code": "async def process_request(self, path: str, request_headers: Headers) -> HTTPResponse | None:\n    path_portion, _, query_string = path.partition('?')\n    websockets.legacy.handshake.check_request(request_headers)\n    subprotocols: list[str] = []\n    for header in request_headers.get_all('Sec-WebSocket-Protocol'):\n        subprotocols.extend([token.strip() for token in header.split(',')])\n    asgi_headers = [(name.encode('ascii'), value.encode('ascii', errors='surrogateescape')) for name, value in request_headers.raw_items()]\n    path = unquote(path_portion)\n    full_path = self.root_path + path\n    full_raw_path = self.root_path.encode('ascii') + path_portion.encode('ascii')\n    self.scope = {'type': 'websocket', 'asgi': {'version': self.config.asgi_version, 'spec_version': '2.4'}, 'http_version': '1.1', 'scheme': self.scheme, 'server': self.server, 'client': self.client, 'root_path': self.root_path, 'path': full_path, 'raw_path': full_raw_path, 'query_string': query_string.encode('ascii'), 'headers': asgi_headers, 'subprotocols': subprotocols, 'state': self.app_state.copy(), 'extensions': {'websocket.http.response': {}}}\n    task = self.loop.create_task(self.run_asgi())\n    task.add_done_callback(self.on_task_complete)\n    self.tasks.add(task)\n    await self.handshake_started_event.wait()\n    return self.initial_response",
    "docstring": "This hook is called to determine if the websocket should return an HTTP response and close. Our behavior here is to start the ASGI application, and then wait for either or in order to determine if we should close the connection.",
    "type": "method",
    "file_path": "uvicorn\\uvicorn\\protocols\\websockets\\websockets_impl.py",
    "ast_data": "AsyncFunctionDef name:process_request arg:self arg:path arg:request_headers arguments arg arg arg Assign Call Call For Call Call Call Call Assign Call Call Call Assign Call Assign Assign Call Call Assign Call Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "add_guard",
    "source_code": "def add_guard(self, name, normal_define):\n    wrap = textwrap.dedent(f'\\n            #if NPY_FEATURE_VERSION >= {self.version}\\n            {{define}}\\n            #endif')\n    return wrap.format(define=normal_define)",
    "docstring": "Wrap a definition behind a version guard",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\code_generators\\genapi.py",
    "ast_data": "FunctionDef name:add_guard arg:self arg:name arg:normal_define arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "same_dynamic_shape",
    "source_code": "def same_dynamic_shape(a, b):\n    a = ops.convert_to_tensor(a, name='a')\n    b = ops.convert_to_tensor(b, name='b')\n\n    def all_shapes_equal():\n        return math_ops.reduce_all(math_ops.equal(array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0), array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0)))\n    return tf_cond.cond(math_ops.equal(array_ops.rank(a), array_ops.rank(b)), all_shapes_equal, lambda: constant_op.constant(False))",
    "docstring": "Returns whether a and b have the same dynamic shape. Args: a: b: Returns: representing if both tensors have the same shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py",
    "ast_data": "FunctionDef name:same_dynamic_shape arg:a arg:b arguments arg arg Assign Call Assign Call FunctionDef name:all_shapes_equal arguments Return return:yes Call Call Call Call Call Call Call Call Return return:yes Call Call Call Call arguments Call"
  },
  {
    "library": "pandas",
    "name": "_get_no_sort_one_missing_indexer",
    "source_code": "def _get_no_sort_one_missing_indexer(n: int, left_missing: bool) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]:\n    idx = np.arange(n, dtype=np.intp)\n    idx_missing = np.full(shape=n, fill_value=-1, dtype=np.intp)\n    if left_missing:\n        return (idx_missing, idx)\n    return (idx, idx_missing)",
    "docstring": "Return join indexers where all of one side is selected without sorting and none of the other side is selected. Parameters ---------- n : int Length of indexers to create. left_missing : bool If True, the left indexer will contain only -1's. If False, the right indexer will contain only -1's. Returns ------- np.ndarray[np.intp] Left indexer np.ndarray[np.intp] Right indexer",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:_get_no_sort_one_missing_indexer arg:n arg:left_missing arguments arg arg Assign Call Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "from_custom_template",
    "source_code": "@classmethod\ndef from_custom_template(cls, searchpath: Sequence[str], html_table: str | None=None, html_style: str | None=None) -> type[Styler]:\n    loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(searchpath), cls.loader])\n\n    class MyStyler(cls):\n        env = jinja2.Environment(loader=loader)\n        if html_table:\n            template_html_table = env.get_template(html_table)\n        if html_style:\n            template_html_style = env.get_template(html_style)\n    return MyStyler",
    "docstring": "Factory function for creating a subclass of `Table Visualization `_ for more examples.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:from_custom_template arg:cls arg:searchpath arg:html_table arg:html_style arguments arg arg arg arg Assign Call Call ClassDef name:MyStyler Assign Call If Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, armA=0.0, armB=0.0, fraction=0.3, angle=None):\n    self.armA = armA\n    self.armB = armB\n    self.fraction = fraction\n    self.angle = angle",
    "docstring": "Parameters ---------- armA : float Minimum length of armA. armB : float Minimum length of armB. fraction : float A fraction of the distance between two points that will be added to armA and armB. angle : float or None Angle of the connecting line (if None, parallel to A and B).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:armA arg:armB arg:fraction arg:angle arguments arg arg arg arg arg Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_adjustable",
    "source_code": "def get_adjustable(self):\n    return self._adjustable",
    "docstring": "Return whether the Axes will adjust its physical dimension ('box') or its data limits ('datalim') to achieve the desired aspect ratio. See Also -------- matplotlib.axes.Axes.set_adjustable Set how the Axes adjusts to achieve the required aspect ratio. matplotlib.axes.Axes.set_aspect For a description of aspect handling.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_adjustable arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_grant_types",
    "source_code": "def validate_grant_types(self):\n    self._validate_claim_value('grant_types')",
    "docstring": "Array of OAuth 2.0 grant type strings that the client can use at the token endpoint.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py",
    "ast_data": "FunctionDef name:validate_grant_types arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_start_workers",
    "source_code": "@abc.abstractmethod\ndef _start_workers(self, worker_group: WorkerGroup) -> dict[int, Any]:\n    raise NotImplementedError",
    "docstring": "Start ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py",
    "ast_data": "FunctionDef name:_start_workers arg:self arg:worker_group arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "to_sharded_dtensor",
    "source_code": "def to_sharded_dtensor(self, tensor: torch.Tensor) -> DTensor:\n    if tensor.shape != self.sharded_size:\n        _raise_assert_with_print(f'Expects size {self.sharded_size} but got {tensor.shape}')\n    return _from_local_no_grad(tensor, self._sharding_spec)",
    "docstring": "Converts a local tensor representing either the sharded parameter or sharded gradient to DTensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_param.py",
    "ast_data": "FunctionDef name:to_sharded_dtensor arg:self arg:tensor arguments arg arg If Compare Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "to_tensor",
    "source_code": "def to_tensor(self, as_padded_sequence: bool=False) -> Union[Tensor, List[Tensor]]:\n    if as_padded_sequence:\n        raise NotImplementedError\n    return self._data",
    "docstring": "Cast :class: to a tensor. `(B, N, 3)`",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\keypoints.py",
    "ast_data": "FunctionDef name:to_tensor arg:self arg:as_padded_sequence arguments arg arg If Raise Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "can_create",
    "source_code": "@classmethod\ndef can_create(cls, interpreter):\n    if not cls.can_describe(interpreter):\n        return None\n    meta = cls.setup_meta(interpreter)\n    if meta is not None and meta:\n        cls._sources_can_be_applied(interpreter, meta)\n    return meta",
    "docstring": "By default, all built-in methods assume that if we can describe it we can create it.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\via_global_self_do.py",
    "ast_data": "FunctionDef name:can_create arg:cls arg:interpreter arguments arg arg If Call Return return:no Assign Call If BoolOp Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_dynamic_ragged_shape_from_tensor",
    "source_code": "def _dynamic_ragged_shape_from_tensor(field, dtype=None) -> dynamic_ragged_shape.DynamicRaggedShape:\n    if isinstance(field, StructuredTensor):\n        return field._ragged_shape\n    shape = array_ops.shape_v2(field, out_type=dtype)\n    if isinstance(shape, tensor.Tensor):\n        return dynamic_ragged_shape.DynamicRaggedShape(row_partitions=[], inner_shape=shape)\n    elif isinstance(shape, dynamic_ragged_shape.DynamicRaggedShape):\n        return shape\n    raise TypeError(f'Expected shape tf.shape({field}) to return a Tensor or a DynamicRaggedShape. Instead, got: {shape}.')",
    "docstring": "Extension of DynamicRaggedShape.from_tensor to support StructuredTensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_dynamic_ragged_shape_from_tensor arg:field arg:dtype arguments arg arg If Call Return return:yes Assign Call If Call Return return:yes Call If Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_LoadStatus",
    "source_code": "class _LoadStatus:\n\n    @abc.abstractmethod\n    def assert_consumed(self):\n        pass\n\n    @abc.abstractmethod\n    def assert_existing_objects_matched(self):\n        pass\n\n    @abc.abstractmethod\n    def assert_nontrivial_match(self):\n        pass\n\n    @abc.abstractmethod\n    def run_restore_ops(self, session=None):\n        pass\n\n    @abc.abstractmethod\n    def initialize_or_restore(self, session=None):\n        pass\n\n    def expect_partial(self):\n        return self",
    "docstring": "Abstract base for load status callbacks.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "ClassDef name:_LoadStatus FunctionDef name:assert_consumed arg:self arguments arg FunctionDef name:assert_existing_objects_matched arg:self arguments arg FunctionDef name:assert_nontrivial_match arg:self arguments arg FunctionDef name:run_restore_ops arg:self arg:session arguments arg arg FunctionDef name:initialize_or_restore arg:self arg:session arguments arg arg FunctionDef name:expect_partial arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "element_spec",
    "source_code": "@property\ndef element_spec(self):\n    return self._element_spec",
    "docstring": "The type specification of an element of this iterator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_user_input_list",
    "source_code": "def get_user_input_list(self, split_node: torch.fx.Node, next_users: list[torch.fx.Node]) -> list[list[Union[torch.fx.Node, _Range]]]:\n    user_inputs_list: list[list[Union[torch.fx.Node, _Range]]] = []\n    for user in next_users:\n        if user.target in (torch.cat, torch.stack):\n            user_inputs_list.append(self.get_merged_user_inputs(split_node, user))\n        else:\n            user_inputs_list.append(self.get_non_cat_node_input(split_node, user))\n    return user_inputs_list",
    "docstring": "Returns list of inputs to the following user nodes, in order. The outer list represents the user node. The inner list represents the inputs to that particular node. This list can either contain - a tuple representing the ranges of get_items that should go into the cat (closed interval) - torch.fx.Node representing \"other\" inputs (which are not coming from our split)",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\split_cat.py",
    "ast_data": "FunctionDef name:get_user_input_list arg:self arg:split_node arg:next_users arguments arg arg arg For If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_getfield_is_safe",
    "source_code": "def _getfield_is_safe(oldtype, newtype, offset):\n    if newtype.hasobject or oldtype.hasobject:\n        if offset == 0 and newtype == oldtype:\n            return\n        if oldtype.names is not None:\n            for name in oldtype.names:\n                if oldtype.fields[name][1] == offset and oldtype.fields[name][0] == newtype:\n                    return\n        raise TypeError('Cannot get/set field of an object array')\n    return",
    "docstring": "Checks safety of getfield for object arrays. As in _view_is_safe, we need to check that memory containing objects is not reinterpreted as a non-object datatype and vice versa. Parameters ---------- oldtype : data-type Data type of the original ndarray. newtype : data-type Data type of the field being accessed by ndarray.getfield offset : int Offset of the field being accessed by ndarray.getfield Raises ------ TypeError If the field access is invalid",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:_getfield_is_safe arg:oldtype arg:newtype arg:offset arguments arg arg arg If BoolOp If BoolOp Compare Compare Return return:no If Compare For If BoolOp Compare Compare Return return:no Raise Call Return return:no"
  },
  {
    "library": "scikit-learn",
    "name": "set_output",
    "source_code": "def set_output(self, *, transform=None):\n    super().set_output(transform=transform)\n    for _, step, _ in self._iter():\n        _safe_set_output(step, transform=transform)\n    return self",
    "docstring": "Set the output container when and are called. will set the output of all estimators in . Parameters ---------- transform : {\"default\", \"pandas\", \"polars\"}, default=None Configure output of and . - : Default output format of a transformer - : DataFrame output - : Polars output - : Transform configuration is unchanged Returns ------- self : estimator instance Estimator instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:set_output arg:self arguments arg arg Call Call For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_batch_norm_reserve_tensor",
    "source_code": "def _get_batch_norm_reserve_tensor(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, eps: float, training: bool) -> Tensor:\n    backend = torch._C._select_batch_norm_backend(input, weight, bias, running_mean, running_var, True, eps)\n    reserve_size = 0\n    if backend == torch._C._BatchNormBackend.Cudnn:\n        reserve_size = torch._C._get_cudnn_batch_norm_reserve_space_size(input, training)\n    return torch.empty(reserve_size, dtype=torch.uint8, layout=input.layout, device=input.device)",
    "docstring": "Return a reserve tensor for batch norm, used only by cudnn to pass forward state to the backward pass. This is needed for and , which support a variety of backends including cudnn. We create this tensor here to get the correct shape in the traced graph if we detect that will call the cudnn kernel, and rely on DCE to avoid materializing this tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\_decomp\\decompositions.py",
    "ast_data": "FunctionDef name:_get_batch_norm_reserve_tensor arg:input arg:weight arg:bias arg:running_mean arg:running_var arg:eps arg:training arguments arg arg arg arg arg arg arg Assign Call Assign If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "ExternalStream",
    "source_code": "class ExternalStream(Stream):\n\n    def __new__(cls, stream_ptr, device=None, **kwargs):\n        with torch.cuda.device(device):\n            return super().__new__(cls, stream_ptr=stream_ptr, **kwargs)",
    "docstring": "Wrapper around an externally allocated CUDA stream. This class is used to wrap streams allocated in other libraries in order to facilitate data exchange and multi-library interactions. .. note:: This class doesn't manage the stream life-cycle, it is the user responsibility to keep the referenced stream alive while this class is being used. Args: stream_ptr(int): Integer representation of the value. allocated externally. device(torch.device or int, optional): the device where the stream was originally allocated. If device is specified incorrectly, subsequent launches using this stream may fail.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\streams.py",
    "ast_data": "ClassDef name:ExternalStream FunctionDef name:__new__ arg:cls arg:stream_ptr arg:device arguments arg arg arg arg With Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "local_offsets",
    "source_code": "def local_offsets(self) -> list[torch.Size]:\n    return [chunk.offsets for chunk in self._storage_meta.chunks]",
    "docstring": "Returns a list of :class:`torch.Size' corresponding to the local offsets for the shards on this rank. Returns an empty list if the current rank does not host any shards for this Tensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py",
    "ast_data": "FunctionDef name:local_offsets arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "vec",
    "source_code": "@property\ndef vec(self) -> Tensor:\n    return self.data[..., 1:]",
    "docstring": "Return the vector with the imaginary part with shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:vec arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__exit__",
    "source_code": "def __exit__(self, etype, evalue, etrace):\n    self.close()",
    "docstring": "Context manager exit method, closes the underlying file if it is open.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:__exit__ arg:self arg:etype arg:evalue arg:etrace arguments arg arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "_KNV0_loop",
    "source_code": "def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):\n    stop = False\n    nb_try = 0\n    while nb_try < maxiter and (not stop):\n        det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))\n        for j in range(B.shape[0]):\n            _KNV0(B, ker_pole, transfer_matrix, j, poles)\n        det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), np.abs(np.linalg.det(transfer_matrix))))\n        cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) / det_transfer_matrix)\n        if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):\n            stop = True\n        nb_try += 1\n    return (stop, cur_rtol, nb_try)",
    "docstring": "Loop over all poles one by one and apply KNV method 0 algorithm",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:_KNV0_loop arg:ker_pole arg:transfer_matrix arg:poles arg:B arg:maxiter arg:rtol arguments arg arg arg arg arg arg Assign Assign While BoolOp Compare Assign Call Call For Call Call Assign Call Call Call Call Call Assign Call If BoolOp Compare Compare Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_sys_info",
    "source_code": "def _get_sys_info():\n    python = sys.version.replace('\\n', ' ')\n    blob = [('python', python), ('executable', sys.executable), ('machine', platform.platform())]\n    return dict(blob)",
    "docstring": "System information Returns ------- sys_info : dict system and Python version information",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_show_versions.py",
    "ast_data": "FunctionDef name:_get_sys_info arguments Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "negate",
    "source_code": "def negate(self):\n    self.negated = not self.negated",
    "docstring": "Negate the sense of the root connector.",
    "type": "method",
    "file_path": "django\\django\\utils\\tree.py",
    "ast_data": "FunctionDef name:negate arg:self arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "new",
    "source_code": "@staticmethod\ndef new(node, function, enclosing_graph):\n    if node.op in ['VariableV2', 'VarHandleOp', 'Placeholder']:\n        return _VarHandle(node, function, enclosing_graph)\n    elif node.op == 'Case':\n        return _Case(node, function, enclosing_graph)\n    elif node.op == 'Merge':\n        return _Merge(node, function, enclosing_graph)\n    elif node.op == 'PartitionedCall':\n        return _PartitionedCall(node, function, enclosing_graph)\n    elif node.op == 'StatefulPartitionedCall':\n        return _PartitionedCall(node, function, enclosing_graph)\n    elif node.op == 'ReadVariableOp':\n        return _ReadVariable(node, function, enclosing_graph)\n    elif node.op == 'ResourceGather':\n        return _ResourceGather(node, function, enclosing_graph)\n    elif node.op == 'ResourceGatherNd':\n        return _ResourceGatherNd(node, function, enclosing_graph)\n    elif node.op in ['If', 'StatelessIf']:\n        return _If(node, function, enclosing_graph)\n    elif node.op in ['While', 'StatelessWhile']:\n        return _While(node, function, enclosing_graph)\n    elif node.op in ['Enter', 'Exit', 'Identity', 'NextIteration', 'Switch', '_SwitchN']:\n        return _Intermediate(node, function, enclosing_graph)\n    else:\n        return _Node(node, function, enclosing_graph)",
    "docstring": "Creates a new _Node base on its operation type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:new arg:node arg:function arg:enclosing_graph arguments arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, bitmap, dpi):\n    super().__init__()\n    _log.debug('%s - __init__()', type(self))\n    self.width = bitmap.GetWidth()\n    self.height = bitmap.GetHeight()\n    self.bitmap = bitmap\n    self.fontd = {}\n    self.dpi = dpi\n    self.gc = None",
    "docstring": "Initialise a wxWindows renderer instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bitmap arg:dpi arguments arg arg arg Call Call Call Call Assign Call Assign Call Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "get_all_fig_managers",
    "source_code": "@classmethod\ndef get_all_fig_managers(cls):\n    return list(cls.figs.values())",
    "docstring": "Return a list of figure managers.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_pylab_helpers.py",
    "ast_data": "FunctionDef name:get_all_fig_managers arg:cls arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_ContainerTemplate",
    "source_code": "class _ContainerTemplate(ABC):\n\n    @abstractmethod\n    def get_next_element_by_instance(self, instance_id: int):\n        ...\n\n    @abstractmethod\n    def is_every_instance_exhausted(self) -> bool:\n        ...\n\n    @abstractmethod\n    def reset(self) -> None:\n        ...\n\n    @abstractmethod\n    def get_length_by_instance(self, instance_id: int):\n        pass",
    "docstring": "Abstract class for container ``. The followings are three required methods.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\combining.py",
    "ast_data": "ClassDef name:_ContainerTemplate FunctionDef name:get_next_element_by_instance arg:self arg:instance_id arguments arg arg FunctionDef name:is_every_instance_exhausted arg:self arguments arg FunctionDef name:reset arg:self arguments arg FunctionDef name:get_length_by_instance arg:self arg:instance_id arguments arg arg"
  },
  {
    "library": "django",
    "name": "PyMemcacheCache",
    "source_code": "class PyMemcacheCache(BaseMemcachedCache):\n\n    def __init__(self, server, params):\n        import pymemcache.serde\n        super().__init__(server, params, library=pymemcache, value_not_found_exception=KeyError)\n        self._class = self._lib.HashClient\n        self._options = {'allow_unicode_keys': True, 'default_noreply': False, 'serde': pymemcache.serde.pickle_serde, **self._options}",
    "docstring": "An implementation of a cache binding using pymemcache.",
    "type": "class",
    "file_path": "django\\django\\core\\cache\\backends\\memcached.py",
    "ast_data": "ClassDef name:PyMemcacheCache FunctionDef name:__init__ arg:self arg:server arg:params arguments arg arg arg Call Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "sparse_eye",
    "source_code": "@tf_export('sparse.eye')\ndef sparse_eye(num_rows, num_columns=None, dtype=dtypes.float32, name=None):\n    with ops.name_scope(name, default_name='eye', values=[num_rows, num_columns]):\n        num_rows = _make_int64_tensor(num_rows, 'num_rows')\n        num_columns = num_rows if num_columns is None else _make_int64_tensor(num_columns, 'num_columns')\n        diag_size = math_ops.minimum(num_rows, num_columns)\n        diag_range = math_ops.range(diag_size, dtype=dtypes.int64)\n        return sparse_tensor.SparseTensor(indices=array_ops_stack.stack([diag_range, diag_range], axis=1), values=array_ops.ones(diag_size, dtype=dtype), dense_shape=[num_rows, num_columns])",
    "docstring": "Creates a two-dimensional sparse tensor with ones along the diagonal. Args: num_rows: Non-negative integer or scalar giving the number of rows in the resulting matrix. num_columns: Optional non-negative integer or scalar giving the number of columns in the resulting matrix. Defaults to . dtype: The type of element in the resulting . name: A name for this . Defaults to \"eye\". Returns: A of shape [num_rows, num_columns] with ones along the diagonal.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py",
    "ast_data": "FunctionDef name:sparse_eye arg:num_rows arg:num_columns arg:dtype arg:name arguments arg arg arg arg With Call Assign Call Assign Compare Call Assign Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, fetches):\n    self._fetch_type = type(fetches)\n    if isinstance(fetches, collections.defaultdict):\n        self._type_ctor = functools.partial(collections.defaultdict, fetches.default_factory)\n    else:\n        self._type_ctor = self._fetch_type\n    self._keys = fetches.keys()\n    self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches.values()]\n    self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)",
    "docstring": "Creates a _DictFetchMapper. Args: fetches: Dict of fetches.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:fetches arguments arg arg Assign Call If Call Assign Call Assign Assign Call Assign Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_caller",
    "source_code": "def _get_caller(offset=3):\n    f = _sys._getframe(offset)\n    our_file = f.f_code.co_filename\n    f = f.f_back\n    while f:\n        code = f.f_code\n        if code.co_filename != our_file:\n            return (code, f)\n        f = f.f_back\n    return (None, None)",
    "docstring": "Returns a code and frame object for the lowest non-logging stack frame.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py",
    "ast_data": "FunctionDef name:_get_caller arg:offset arguments arg Assign Call Assign Assign While Assign If Compare Return return:yes Assign Return return:no"
  },
  {
    "library": "pytorch",
    "name": "set_growth_factor",
    "source_code": "def set_growth_factor(self, new_factor: float) -> None:\n    self._growth_factor = new_factor",
    "docstring": "Set a new scale growth factor. Args: new_scale (float): Value to use as the new scale growth factor.",
    "type": "method",
    "file_path": "pytorch\\torch\\amp\\grad_scaler.py",
    "ast_data": "FunctionDef name:set_growth_factor arg:self arg:new_factor arguments arg arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "take",
    "source_code": "def take(self, count, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import take_op\n    return take_op._take(self, count, name=name)",
    "docstring": "Creates a with at most elements from this dataset. >>> dataset = tf.data.Dataset.range(10) >>> dataset = dataset.take(3) >>> [a.item() for a in dataset.as_numpy_iterator()] [0, 1, 2] Args: count: A scalar , representing the number of elements of this dataset that should be taken to form the new dataset. If is -1, or if is greater than the size of this dataset, the new dataset will contain all elements of this dataset. name: (Optional.) A name for the tf.data operation. Returns: A new with the transformation applied as described above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:take arg:self arg:count arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "OutOfRangeError",
    "source_code": "@tf_export('errors.OutOfRangeError')\nclass OutOfRangeError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(OutOfRangeError, self).__init__(node_def, op, message, OUT_OF_RANGE, *args)",
    "docstring": "Raised when an operation iterates past the valid range. Unlike , this error indicates a problem may be fixed if the system state changes. For example, if a list grows and the operation is now within the valid range. overlaps with and should be preferred as the more specific error when iterating or accessing a range. For example, iterating a TF dataset past the last item in the dataset will raise this error.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:OutOfRangeError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_process_parameters",
    "source_code": "def _process_parameters(self, n, p):\n    eps = np.finfo(np.result_type(np.asarray(p), np.float32)).eps * 10\n    p = np.array(p, dtype=np.float64, copy=True)\n    p_adjusted = 1.0 - p[..., :-1].sum(axis=-1)\n    i_adjusted = np.abs(1 - p.sum(axis=-1)) > eps\n    p[i_adjusted, -1] = p_adjusted[i_adjusted]\n    if np.any(i_adjusted):\n        message = f'Some rows of `p` do not sum to 1.0 within tolerance of eps={eps!r}. Currently, the last element of these rows is adjusted to compensate, but this condition will produce NaNs beginning in SciPy 1.18.0. Please ensure that rows of `p` sum to 1.0 to avoid futher disruption.'\n        warnings.warn(message, FutureWarning, stacklevel=3)\n    pcond = np.any(p < 0, axis=-1)\n    pcond |= np.any(p > 1, axis=-1)\n    n = np.array(n, dtype=int, copy=True)\n    ncond = n < 0\n    return (n, p, ncond | pcond)",
    "docstring": "Returns: n_, p_, npcond. n_ and p_ are arrays of the correct shape; npcond is a boolean array flagging values out of the domain.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_parameters arg:self arg:n arg:p arguments arg arg arg Assign Call Call Call Assign Call Assign Call Assign Compare Call Call Assign If Call Assign Call Assign Call Compare Call Compare Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "open",
    "source_code": "def open(spider):\n    pass",
    "docstring": "Open the storage for the given spider. It must return a file-like object that will be used for the exporters",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\feedexport.py",
    "ast_data": "FunctionDef name:open arg:spider arguments arg"
  },
  {
    "library": "numpy",
    "name": "hermeval2d",
    "source_code": "def hermeval2d(x, y, c):\n    return pu._valnd(hermeval, c, x, y)",
    "docstring": "Evaluate a 2-D HermiteE series at points (x, y). This function returns the values: .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) The parameters and are converted to arrays only if they are tuples or a lists, otherwise they are treated as a scalars and they must have the same shape after conversion. In either case, either and or their elements must support multiplication and addition both with themselves and with the elements of . If is a 1-D array a one is implicitly appended to its shape to make it 2-D. The shape of the result will be c.shape[2:] + x.shape. Parameters ---------- x, y : array_like, compatible objects The two dimensional series is evaluated at the points `xyxycxy`. See Also -------- hermeval, hermegrid2d, hermeval3d, hermegrid3d",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "FunctionDef name:hermeval2d arg:x arg:y arg:c arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "first_path",
    "source_code": "def first_path(self, dst: str) -> list[str]:\n    path = []\n    while dst:\n        path.append(dst)\n        candidates = self._pred[dst].keys()\n        dst, min_idx = ('', None)\n        for candidate in candidates:\n            idx = self._node_order.get(candidate, None)\n            if idx is None:\n                break\n            if min_idx is None or idx < min_idx:\n                min_idx = idx\n                dst = candidate\n    return list(reversed(path))",
    "docstring": "Returns a list of nodes that show the first path that resulted in dst being added to the graph.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\_digraph.py",
    "ast_data": "FunctionDef name:first_path arg:self arg:dst arguments arg arg Assign While Call Assign Call Assign For Assign Call If Compare If BoolOp Compare Compare Assign Assign Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "allpath",
    "source_code": "def allpath(name):\n    split = name.split('/')\n    return os.path.join(*split)",
    "docstring": "Convert a /-separated pathname to one using the OS's path separator.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:allpath arg:name arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "GELU",
    "source_code": "class GELU(Module):\n    __constants__ = ['approximate']\n    approximate: str\n\n    def __init__(self, approximate: str='none') -> None:\n        super().__init__()\n        self.approximate = approximate\n\n    def forward(self, input: Tensor) -> Tensor:\n        return F.gelu(input, approximate=self.approximate)\n\n    def extra_repr(self) -> str:\n        return f'approximate={repr(self.approximate)}'",
    "docstring": "Applies the Gaussian Error Linear Units function. .. math:: \\text{GELU}(x) = x * \\Phi(x) where :math: is the Cumulative Distribution Function for Gaussian Distribution. When the approximate argument is 'tanh', Gelu is estimated with: .. math:: \\text{GELU}(x) = 0.5 * x * (1 + \\text{Tanh}(\\sqrt{2 / \\pi} * (x + 0.044715 * x^3))) Args: approximate (str, optional): the gelu approximation algorithm to use: `(*)*(*)`, same shape as the input. .. image:: ../scripts/activation_images/GELU.png Examples:: >>> m = nn.GELU() >>> input = torch.randn(2) >>> output = m(input)",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\activation.py",
    "ast_data": "ClassDef name:GELU Assign FunctionDef name:__init__ arg:self arg:approximate arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call FunctionDef name:extra_repr arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, a, b, **kwargs):\n    if not a.is_affine or not b.is_affine:\n        raise ValueError(\"'a' and 'b' must be affine transforms\")\n    if a.output_dims != b.input_dims:\n        raise ValueError(\"The output dimension of 'a' must be equal to the input dimensions of 'b'\")\n    self.input_dims = a.input_dims\n    self.output_dims = b.output_dims\n    super().__init__(**kwargs)\n    self._a = a\n    self._b = b\n    self.set_children(a, b)\n    self._mtx = None",
    "docstring": "Create a new composite transform that is the result of applying *a* then *b*. You will generally not call this constructor directly but write `` instead, which will automatically choose the best kind of composite transform instance to create.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:a arg:b arguments arg arg arg arg If BoolOp Raise Call If Compare Raise Call Assign Assign Call Call Assign Assign Call Assign"
  },
  {
    "library": "scrapy",
    "name": "_get_storage",
    "source_code": "def _get_storage(self, uri: str, feed_options: dict[str, Any]) -> FeedStorageProtocol:\n    cls = self.storages.get(urlparse(uri).scheme, self.storages['file'])\n    return build_from_crawler(cls, self.crawler, uri, feed_options=feed_options)",
    "docstring": "Build a storage object for the specified *uri* with the specified *feed_options*.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\feedexport.py",
    "ast_data": "FunctionDef name:_get_storage arg:self arg:uri arg:feed_options arguments arg arg arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "env",
    "source_code": "class env(Action):\n\n    def __init__(self, dest, default=None, required=False, **kwargs) -> None:\n        env_name = f'PET_{dest.upper()}'\n        default = os.environ.get(env_name, default)\n        if default:\n            required = False\n        super().__init__(dest=dest, default=default, required=required, **kwargs)\n\n    def __call__(self, parser, namespace, values, option_string=None):\n        setattr(namespace, self.dest, values)",
    "docstring": "Get argument values from ``) Example: :: parser.add_argument(\"-f\", \"--foo\", action=env, default=\"bar\") ./program -> args.foo=\"bar\" ./program -f baz -> args.foo=\"baz\" ./program --foo baz -> args.foo=\"baz\" PET_FOO=\"env_bar\" ./program -f baz -> args.foo=\"baz\" PET_FOO=\"env_bar\" ./program --foo baz -> args.foo=\"baz\" PET_FOO=\"env_bar\" ./program -> args.foo=\"env_bar\" parser.add_argument(\"-f\", \"--foo\", action=env, required=True) ./program -> fails ./program -f baz -> args.foo=\"baz\" PET_FOO=\"env_bar\" ./program -> args.foo=\"env_bar\" PET_FOO=\"env_bar\" ./program -f baz -> args.foo=\"baz\"",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\argparse_util.py",
    "ast_data": "ClassDef name:env FunctionDef name:__init__ arg:self arg:dest arg:default arg:required arguments arg arg arg arg arg Assign Call Assign Call If Assign Call Call FunctionDef name:__call__ arg:self arg:parser arg:namespace arg:values arg:option_string arguments arg arg arg arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "_add_iterable_element",
    "source_code": "def _add_iterable_element(self, f, v, feature_names, vocab, *, fitting=True, transforming=False, indices=None, values=None):\n    for vv in v:\n        if isinstance(vv, str):\n            feature_name = '%s%s%s' % (f, self.separator, vv)\n            vv = 1\n        else:\n            raise TypeError(f'Unsupported type {type(vv)} in iterable value. Only iterables of string are supported.')\n        if fitting and feature_name not in vocab:\n            vocab[feature_name] = len(feature_names)\n            feature_names.append(feature_name)\n        if transforming and feature_name in vocab:\n            indices.append(vocab[feature_name])\n            values.append(self.dtype(vv))",
    "docstring": "Add feature names for iterable of strings",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\_dict_vectorizer.py",
    "ast_data": "FunctionDef name:_add_iterable_element arg:self arg:f arg:v arg:feature_names arg:vocab arguments arg arg arg arg arg arg arg arg arg For If Call Assign Assign Raise Call Call If BoolOp Compare Assign Call Call If BoolOp Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "initial_value",
    "source_code": "@property\ndef initial_value(self):\n    if context.executing_eagerly():\n        raise RuntimeError('This property is not supported when eager execution is enabled.')\n    return self._initial_value",
    "docstring": "Returns the Tensor used as the initial value for the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:initial_value arg:self arguments arg If Call Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "type_name",
    "source_code": "@property\ndef type_name(self):\n    return capi.get_field_type_name(self.type)",
    "docstring": "Return the OGR field type name for this Field.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\field.py",
    "ast_data": "FunctionDef name:type_name arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_GetGradSource",
    "source_code": "def _GetGradSource(op_or_tensor):\n    name_tokens = op_or_tensor.name.split('/')\n    grad_pos = [i for i, x in enumerate(name_tokens) if x.startswith('gradients')]\n    if not grad_pos:\n        raise ValueError(f\"Expected op/tensor name to start with gradients (excluding scope), got: {op_or_tensor.name}. This means that a tf.gradients op with this op in its dependency path has a custom name that does not start with 'gradients'. Please make sure all calls to tf.gradients that have non-empty `name` arguments use names that start with 'gradients'.\")\n    return '/'.join(name_tokens[:grad_pos[-1] + 1])",
    "docstring": "Identify which call to tf.gradients created this gradient op or tensor. TensorArray gradient calls use an accumulator TensorArray object. If multiple gradients are calculated and run in the same session, the multiple gradient nodes may accidentally flow through the same accumulator TensorArray. This double counting breaks the TensorArray gradient flow. The solution is to identify which gradient call this particular TensorArray*Grad is being called in, by looking at the input gradient tensor's name, and create or lookup an accumulator gradient TensorArray associated with this specific call. This solves any confusion and ensures different gradients from the same forward graph get their own accumulators. This function creates the unique label associated with the tf.gradients call that is used to create the gradient TensorArray. Args: op_or_tensor: or which is an input to a TensorArray*Grad call. Returns: A python string, the unique label associated with this particular gradients calculation. Raises: ValueError: If not called within a gradients calculation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_grad.py",
    "ast_data": "FunctionDef name:_GetGradSource arg:op_or_tensor arguments arg Assign Call Assign Call Call If Raise Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_pformat_subprocess",
    "source_code": "def _pformat_subprocess(command):\n    return command if isinstance(command, str) else ' '.join((shlex.quote(os.fspath(arg)) for arg in command))",
    "docstring": "Pretty-format a subprocess command for printing/logging purposes.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\cbook.py",
    "ast_data": "FunctionDef name:_pformat_subprocess arg:command arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_covariance",
    "source_code": "def get_covariance(self):\n    xp, _ = get_namespace(self.components_)\n    components_ = self.components_\n    exp_var = self.explained_variance_\n    if self.whiten:\n        components_ = components_ * xp.sqrt(exp_var[:, np.newaxis])\n    exp_var_diff = exp_var - self.noise_variance_\n    exp_var_diff = xp.where(exp_var > self.noise_variance_, exp_var_diff, xp.asarray(0.0, device=device(exp_var), dtype=exp_var.dtype))\n    cov = components_.T * exp_var_diff @ components_\n    _fill_or_add_to_diagonal(cov, self.noise_variance_, xp)\n    return cov",
    "docstring": "Compute data covariance with the generative model. `` where S**2 contains the explained variances, and sigma2 contains the noise variances. Returns ------- cov : array of shape=(n_features, n_features) Estimated covariance of data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_base.py",
    "ast_data": "FunctionDef name:get_covariance arg:self arguments arg Assign Call Assign Assign If Assign Call Assign Assign Call Compare Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "MonthArchiveView",
    "source_code": "class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):\n    template_name_suffix = '_archive_month'",
    "docstring": "List of objects published in a given month.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "ClassDef name:MonthArchiveView Assign"
  },
  {
    "library": "pytorch",
    "name": "storage_meta",
    "source_code": "def storage_meta(self) -> Optional[StorageMeta]:\n    return None",
    "docstring": "Return the storage-specific metadata. This is used to store additional information in a checkpoint that can be useful for providing request-level observability. StorageMeta is passed to the `` during save calls. Returns None by default. TODO: provide an example",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:storage_meta arg:self arguments arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "__setstate__",
    "source_code": "def __setstate__(self, state):\n    self.__dict__ = state\n    self._lock = threading.RLock()\n    self._descriptor_cache = weakref.WeakKeyDictionary()\n    self._key_for_call_stats = self._get_key_for_call_stats()",
    "docstring": "Restore from pickled state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:__setstate__ arg:self arg:state arguments arg arg Assign Assign Call Assign Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y):\n    X, y = validate_data(self, X, y, multi_output=True, y_numeric=True)\n    n_features = X.shape[1]\n    X, y, X_offset, y_offset, X_scale, Gram, Xy = _pre_fit(X, y, None, self.precompute, self.fit_intercept, copy=True)\n    if y.ndim == 1:\n        y = y[:, np.newaxis]\n    if self.n_nonzero_coefs is None and self.tol is None:\n        self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)\n    elif self.tol is not None:\n        self.n_nonzero_coefs_ = None\n    else:\n        self.n_nonzero_coefs_ = self.n_nonzero_coefs\n    if Gram is False:\n        coef_, self.n_iter_ = orthogonal_mp(X, y, n_nonzero_coefs=self.n_nonzero_coefs_, tol=self.tol, precompute=False, copy_X=True, return_n_iter=True)\n    else:\n        norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None\n        coef_, self.n_iter_ = orthogonal_mp_gram(Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_, tol=self.tol, norms_squared=norms_sq, copy_Gram=True, copy_Xy=True, return_n_iter=True)\n    self.coef_ = coef_.T\n    self._set_intercept(X_offset, y_offset, X_scale)\n    return self",
    "docstring": "Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Will be cast to X's dtype if necessary. Returns ------- self : object Returns an instance of self.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_omp.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call Assign Assign Call If Compare Assign If BoolOp Compare Compare Assign Call Call If Compare Assign Assign If Compare Assign Call Assign Compare Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "create",
    "source_code": "def create():\n    return gen_data_flow_ops.tensor_array_v3(dtype=dtype, size=size, element_shape=element_shape, identical_element_shapes=infer_shape, dynamic_size=self._dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, name=scope)",
    "docstring": "Create the TensorArray op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:create arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "OpStrategy",
    "source_code": "class OpStrategy(StrategyType):\n\n    def __init__(self, strategies: list[PlacementStrategy]) -> None:\n        super().__init__()\n        self.strategies: list[PlacementStrategy] = strategies\n\n    def __str__(self) -> str:\n        strategy_list_str = ', '.join([str(strategy) for strategy in self.strategies])\n        mesh_shape = self.mesh_shape\n        return f'[{strategy_list_str}] @ mesh: {mesh_shape}'\n\n    def max_num_shards(self) -> int:\n        return max((strategy.output_spec.num_shards for strategy in self.strategies))\n\n    @property\n    def mesh(self):\n        return self.strategies[0].mesh\n\n    @property\n    def mesh_shape(self):\n        return self.strategies[0].mesh.shape\n\n    @property\n    def ndim(self):\n        return self.strategies[0].output_spec.ndim\n\n    @property\n    def shape(self):\n        return self.strategies[0].output_spec.shape",
    "docstring": "OpStrategy that consists of a list of placement strategies associated with the op",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py",
    "ast_data": "ClassDef name:OpStrategy FunctionDef name:__init__ arg:self arg:strategies arguments arg arg Call Call FunctionDef name:__str__ arg:self arguments arg Assign Call Call Assign Return return:yes FunctionDef name:max_num_shards arg:self arguments arg Return return:yes Call FunctionDef name:mesh arg:self arguments arg Return return:yes FunctionDef name:mesh_shape arg:self arguments arg Return return:yes FunctionDef name:ndim arg:self arguments arg Return return:yes FunctionDef name:shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "summarize",
    "source_code": "def summarize(self, highlight=None):\n    lines = [RL('Command-line configuration:', 'bold'), RL('')]\n    for name, val in self._config.items():\n        highlight_attr = 'bold' if name == highlight else None\n        line = RL('  ')\n        line += RL(name, ['underline', highlight_attr])\n        line += RL(': ')\n        line += RL(str(val), font_attr=highlight_attr)\n        lines.append(line)\n    return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)",
    "docstring": "Get a text summary of the config. Args: highlight: A property name to highlight in the output. Returns: A output.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\cli_config.py",
    "ast_data": "FunctionDef name:summarize arg:self arg:highlight arguments arg arg Assign Call Call For Call Assign Compare Assign Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "brightness",
    "source_code": "def brightness(min_mag: float, max_mag: float) -> OperationBase:\n    return Brightness(None, 1.0, magnitude_range=(min_mag, max_mag))",
    "docstring": "Return brightness op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py",
    "ast_data": "FunctionDef name:brightness arg:min_mag arg:max_mag arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "AddValue",
    "source_code": "def AddValue(self, val):\n    if val.name in self._values:\n        result = self._external_values.get(val.name)\n        return val if result is None else result\n    result = val\n    self._values.add(val.name)\n    if self._outer_context:\n        result = self._outer_context.AddValue(val)\n        self._values.add(result.name)\n    self._external_values[val.name] = result\n    return result",
    "docstring": "Add to the current context and its outer context recursively.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\xla.py",
    "ast_data": "FunctionDef name:AddValue arg:self arg:val arguments arg arg If Compare Assign Call Return return:yes Compare Assign Call If Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_common_dtype",
    "source_code": "def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:\n    if len(set(dtypes)) == 1:\n        return self\n    else:\n        return None",
    "docstring": "Return the common dtype, if one exists. Used in implementation. This is for example used to determine the resulting dtype in a concat operation. If no common dtype exists, return None (which gives the other dtypes the chance to determine a common dtype). If all dtypes in the list return None, then the common dtype will be \"object\" dtype (this means it is never needed to return \"object\" dtype from this method itself). Parameters ---------- dtypes : list of dtypes The dtypes for which to determine a common dtype. This is a list of np.dtype or ExtensionDtype instances. Returns ------- Common dtype (np.dtype or ExtensionDtype) or None",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\base.py",
    "ast_data": "FunctionDef name:_get_common_dtype arg:self arg:dtypes arguments arg arg If Compare Call Call Return return:yes Return return:no"
  },
  {
    "library": "django",
    "name": "get_commands",
    "source_code": "@functools.cache\ndef get_commands():\n    commands = {name: 'django.core' for name in find_commands(__path__[0])}\n    if not settings.configured:\n        return commands\n    for app_config in reversed(apps.get_app_configs()):\n        path = os.path.join(app_config.path, 'management')\n        commands.update({name: app_config.name for name in find_commands(path)})\n    return commands",
    "docstring": "Return a dictionary mapping command names to their callback applications. Look for a management.commands package in django.core, and in each installed application -- if a commands package exists, register all commands in that package. Core commands are always included. If a settings module has been specified, also include user-defined commands. The dictionary is in the format {command_name: app_name}. Key-value pairs from this dictionary can then be used in calls to load_command_class(app_name, command_name) The dictionary is cached on the first call and reused on subsequent calls.",
    "type": "function",
    "file_path": "django\\django\\core\\management\\__init__.py",
    "ast_data": "FunctionDef name:get_commands arguments Assign Call If Return return:yes For Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_auto_imports",
    "source_code": "def get_auto_imports(self):\n    app_models_imports = [f'{model.__module__}.{model.__name__}' for model in reversed(apps.get_models()) if model.__module__]\n    return app_models_imports",
    "docstring": "Return a sequence of import paths for objects to be auto-imported. By default, import paths for models in INSTALLED_APPS are included, with models from earlier apps taking precedence in case of a name collision. For example, for an unchanged INSTALLED_APPS, this method returns: [ \"django.contrib.sessions.models.Session\", \"django.contrib.contenttypes.models.ContentType\", \"django.contrib.auth.models.User\", \"django.contrib.auth.models.Group\", \"django.contrib.auth.models.Permission\", \"django.contrib.admin.models.LogEntry\", ]",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\shell.py",
    "ast_data": "FunctionDef name:get_auto_imports arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_qconfig_info",
    "source_code": "def get_qconfig_info(self, model) -> dict[str, DetectorQConfigInfo]:\n    dynamic_static_info = self._generate_dict_info(model)\n    module_fqn_to_detector_qconfig_info = {}\n    for module_fqn in dynamic_static_info:\n        detector_qconfig_info = DetectorQConfigInfo(module_fqn)\n        dynamic_static_recommended: bool = dynamic_static_info[module_fqn][self.DEFAULT_DYNAMIC_REC_KEY]\n        detector_qconfig_info.is_activation_dynamic = dynamic_static_recommended\n        module_fqn_to_detector_qconfig_info[module_fqn] = detector_qconfig_info\n    return module_fqn_to_detector_qconfig_info",
    "docstring": "Returns the DetectorQConfigInfo for each module_fqn relevant Args model (nn.Module or subclass): model to find observer insertion points Returns a Dict mapping from unique observer fqns (where we want to insert them) to: A DetectorQConfigInfo with the information to generate a QConfig for a specific module",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:get_qconfig_info arg:self arg:model arguments arg arg Assign Call Assign For Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "write_outputs",
    "source_code": "def write_outputs(filename, headers, row, upload_to_benchmark_db: bool=True):\n    global disable_output\n    if disable_output:\n        return\n    output_csv(filename, headers, row)\n    if upload_to_benchmark_db:\n        output_json(filename, headers, row)",
    "docstring": "Write both CSV and JSON outputs using the original CSV output interface",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\dynamo\\common.py",
    "ast_data": "FunctionDef name:write_outputs arg:filename arg:headers arg:row arg:upload_to_benchmark_db arguments arg arg arg arg If Return return:no Call If Call"
  },
  {
    "library": "pytorch",
    "name": "_broadcast_params_from_rank",
    "source_code": "def _broadcast_params_from_rank(self, rank: int):\n    assert not self._overlap_with_ddp, '`_broadcast_params_from_rank()` should not be used if `overlap_with_ddp=True`; instead, the broadcasting should happen in the DDP communication hook'\n    handles = []\n    if self.parameters_as_bucket_view:\n        for dev_i_buckets in self._buckets:\n            bucket = dev_i_buckets[rank]\n            global_rank = dist.distributed_c10d.get_global_rank(self.process_group, rank)\n            handles.append(dist.broadcast(tensor=bucket, src=global_rank, group=self.process_group, async_op=True))\n    else:\n        param_groups = self._partition_parameters()[rank]\n        global_rank = dist.distributed_c10d.get_global_rank(self.process_group, rank)\n        for param_group in param_groups:\n            handles.extend((dist.broadcast(tensor=param.data, src=global_rank, group=self.process_group, async_op=True) for param in param_group['params']))\n    return handles",
    "docstring": "Broadcast the shard of parameters from a given rank to all other ranks asynchronously. Arguments: rank (int): the source rank. Returns: A :class: of async work handles for the `` s performed to synchronize the parameters.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_broadcast_params_from_rank arg:self arg:rank arguments arg arg Assign If For Assign Assign Call Call Call Assign Call Assign Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "value",
    "source_code": "def value(self):\n    return pywrap_tfe.TFE_MonitoringCounterCellValue(self._cell)",
    "docstring": "Retrieves the current value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:value arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "user_deleted_form",
    "source_code": "def user_deleted_form(request, obj, formset, index, inline):\n    return inline.has_delete_permission(request, obj) and '{}-{}-DELETE'.format(formset.prefix, index) in request.POST",
    "docstring": "Return whether or not the user deleted the form.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:user_deleted_form arg:request arg:obj arg:formset arg:index arg:inline arguments arg arg arg arg arg Return return:yes BoolOp Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "cuFFTPlanCache",
    "source_code": "class cuFFTPlanCache:\n\n    def __init__(self, device_index):\n        self.device_index = device_index\n    size = cuFFTPlanCacheAttrContextProp(torch._cufft_get_plan_cache_size, '.size is a read-only property showing the number of plans currently in the cache. To change the cache capacity, set cufft_plan_cache.max_size.')\n    max_size = cuFFTPlanCacheAttrContextProp(torch._cufft_get_plan_cache_max_size, torch._cufft_set_plan_cache_max_size)\n\n    def clear(self):\n        return torch._cufft_clear_plan_cache(self.device_index)",
    "docstring": "Represent a specific plan cache for a specific . The attributes and , and method , can fetch and/ or change properties of the C++ cuFFT plan cache.",
    "type": "class",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "ClassDef name:cuFFTPlanCache FunctionDef name:__init__ arg:self arg:device_index arguments arg arg Assign Assign Call Assign Call FunctionDef name:clear arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_create_mime_attachment",
    "source_code": "def _create_mime_attachment(self, content, mimetype):\n    basetype, subtype = mimetype.split('/', 1)\n    if basetype == 'text':\n        encoding = self.encoding or settings.DEFAULT_CHARSET\n        attachment = SafeMIMEText(content, subtype, encoding)\n    elif basetype == 'message' and subtype == 'rfc822':\n        if isinstance(content, EmailMessage):\n            content = content.message()\n        elif not isinstance(content, Message):\n            content = message_from_bytes(force_bytes(content))\n        attachment = SafeMIMEMessage(content, subtype)\n    else:\n        attachment = MIMEBase(basetype, subtype)\n        attachment.set_payload(content)\n        Encoders.encode_base64(attachment)\n    return attachment",
    "docstring": "Convert the content, mimetype pair into a MIME attachment object. If the mimetype is message/rfc822, content may be an email.Message or EmailMessage object, as well as a str.",
    "type": "method",
    "file_path": "django\\django\\core\\mail\\message.py",
    "ast_data": "FunctionDef name:_create_mime_attachment arg:self arg:content arg:mimetype arguments arg arg arg Assign Call If Compare Assign BoolOp Assign Call If BoolOp Compare Compare If Call Assign Call If Call Assign Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_claims_locales_supported",
    "source_code": "def validate_claims_locales_supported(self):\n    validate_array_value(self, 'claims_locales_supported')",
    "docstring": "OPTIONAL. Languages and scripts supported for values in Claims being returned, represented as a JSON array of BCP47 [RFC5646] language tag values. Not all languages and scripts are necessarily supported for all Claim values.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\discovery\\models.py",
    "ast_data": "FunctionDef name:validate_claims_locales_supported arg:self arguments arg Call"
  },
  {
    "library": "kornia",
    "name": "TranslateY",
    "source_code": "class TranslateY(OperationBase):\n\n    def __init__(self, initial_magnitude: Optional[float]=0.2, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(0.0, 0.5), temperature: float=0.1, symmetric_megnitude: bool=True) -> None:\n        if symmetric_megnitude and magnitude_range[0] < 0:\n            raise ValueError(f'Lower bound of {self.__class__.__name__} is a symmetric operation. The lower bound must above 0. Got {magnitude_range[0]}.')\n        super().__init__(K.RandomTranslate(None, magnitude_range, same_on_batch=False, p=initial_probability, align_corners=True), initial_magnitude=[('translate_y', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude)",
    "docstring": "Apply translate operation along y-axis. Args: initial_magnitude: the initial magnitude. initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. magnitude_range: the sampling range for random sampling and clamping the optimized magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:TranslateY FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg If BoolOp Compare Raise Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "trailing_slash",
    "source_code": "def trailing_slash(missing=True, extra=False, status=None, debug=False):\n    request = cherrypy.serving.request\n    pi = request.path_info\n    if debug:\n        cherrypy.log('is_index: %r, missing: %r, extra: %r, path_info: %r' % (request.is_index, missing, extra, pi), 'TOOLS.TRAILING_SLASH')\n    if request.is_index is True:\n        if missing:\n            if not pi.endswith('/'):\n                new_url = cherrypy.url(pi + '/', request.query_string)\n                raise cherrypy.HTTPRedirect(new_url, status=status or 301)\n    elif request.is_index is False:\n        if extra:\n            if pi.endswith('/') and pi != '/':\n                new_url = cherrypy.url(pi[:-1], request.query_string)\n                raise cherrypy.HTTPRedirect(new_url, status=status or 301)",
    "docstring": "Redirect if path_info has (missing|extra) trailing slash.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:trailing_slash arg:missing arg:extra arg:status arg:debug arguments arg arg arg arg Assign Assign If Call If Compare If If Call Assign Call Raise Call BoolOp If Compare If If BoolOp Call Compare Assign Call Raise Call BoolOp"
  },
  {
    "library": "authlib",
    "name": "authenticate_token",
    "source_code": "def authenticate_token(self, token_string):\n    raise NotImplementedError()",
    "docstring": "A method to query token from database with the given token string. Developers MUST re-implement this method. For instance:: def authenticate_token(self, token_string): return get_token_from_database(token_string) :param token_string: A string to represent the access_token. :return: token",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\resource_protector.py",
    "ast_data": "FunctionDef name:authenticate_token arg:self arg:token_string arguments arg arg Raise Call"
  },
  {
    "library": "scrapy",
    "name": "ItemFilter",
    "source_code": "class ItemFilter:\n    feed_options: dict[str, Any] | None\n    item_classes: tuple[type, ...]\n\n    def __init__(self, feed_options: dict[str, Any] | None) -> None:\n        self.feed_options = feed_options\n        if feed_options is not None:\n            self.item_classes = tuple((load_object(item_class) for item_class in feed_options.get('item_classes') or ()))\n        else:\n            self.item_classes = ()\n\n    def accepts(self, item: Any) -> bool:\n        if self.item_classes:\n            return isinstance(item, self.item_classes)\n        return True",
    "docstring": "This will be used by FeedExporter to decide if an item should be allowed to be exported to a particular feed. :param feed_options: feed specific options passed from FeedExporter :type feed_options: dict",
    "type": "class",
    "file_path": "scrapy\\scrapy\\extensions\\feedexport.py",
    "ast_data": "ClassDef name:ItemFilter FunctionDef name:__init__ arg:self arg:feed_options arguments arg arg Assign If Compare Assign Call Call BoolOp Call Assign FunctionDef name:accepts arg:self arg:item arguments arg arg If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "DatasetCreator",
    "source_code": "class DatasetCreator(object):\n\n    def __init__(self, dataset_fn, input_options=None):\n        if not callable(dataset_fn):\n            raise TypeError('`dataset_fn` for `DatasetCreator` must be a `callable`.')\n        if input_options and (not isinstance(input_options, distribute_lib.InputOptions)):\n            raise TypeError('`input_options` for `DatasetCreator` must be a `tf.distribute.InputOptions`.')\n        self.dataset_fn = dataset_fn\n        self.input_options = input_options\n\n    def __call__(self, *args, **kwargs):\n        dataset = self.dataset_fn(*args, **kwargs)\n        if not isinstance(dataset, data_types.DatasetV2):\n            raise TypeError('The `callable` provided to `DatasetCreator` must return a Dataset.')\n        return dataset",
    "docstring": "Object that returns a upon invoking. is designated as a supported type for , or the input, in . Pass an instance of this class to when using a callable (with a argument) that returns a . usage with is intended to work across all s, as long as is used at model creation: Note: When using , argument in must be provided as the cardinality of such input cannot be inferred. Args: dataset_fn: A callable that takes a single argument of type , which is used for batch size calculation and cross-worker input pipeline sharding (if neither is needed, the parameter can be ignored in the ), and returns a . input_options: Optional , used for specific options when used with distribution, for example, whether to prefetch dataset elements to accelerator device memory or host device memory, and prefetch buffer size in the replica device memory. No effect if not used with distributed training. See for more information.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\dataset_creator.py",
    "ast_data": "ClassDef name:DatasetCreator FunctionDef name:__init__ arg:self arg:dataset_fn arg:input_options arguments arg arg arg If Call Raise Call If BoolOp Call Raise Call Assign Assign FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call If Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "remove_extra_line_nums",
    "source_code": "def remove_extra_line_nums(instructions):\n    cur_line_no = None\n\n    def remove_line_num(inst):\n        nonlocal cur_line_no\n        if inst.starts_line is None:\n            return\n        elif inst.starts_line == cur_line_no:\n            inst.starts_line = None\n        else:\n            cur_line_no = inst.starts_line\n    for inst in instructions:\n        remove_line_num(inst)",
    "docstring": "Remove extra starts line properties before packing bytecode",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_analysis.py",
    "ast_data": "FunctionDef name:remove_extra_line_nums arg:instructions arguments arg Assign FunctionDef name:remove_line_num arg:inst arguments arg If Compare Return return:no If Compare Assign Assign For Call"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.embedding_.shape[1]",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\manifold\\_t_sne.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "derive",
    "source_code": "@abc.abstractmethod\ndef derive(self, key_material: bytes) -> bytes:\n    pass",
    "docstring": "Deterministically generates and returns a new key based on the existing key material.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\kdf\\__init__.py",
    "ast_data": "FunctionDef name:derive arg:self arg:key_material arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "set_capstyle",
    "source_code": "@_docstring.interpd\ndef set_capstyle(self, cs):\n    self._capstyle = CapStyle(cs)",
    "docstring": "Set the for the collection (for all its elements). Parameters ---------- cs : or %(CapStyle)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_capstyle arg:self arg:cs arguments arg arg Assign Call"
  },
  {
    "library": "sphinx",
    "name": "disconnect",
    "source_code": "def disconnect(self, listener_id: int) -> None:\n    logger.debug('[app] disconnecting event: [id=%s]', listener_id)\n    self.events.disconnect(listener_id)",
    "docstring": "Unregister callback by *listener_id*. :param listener_id: A listener_id that :meth: returns",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:disconnect arg:self arg:listener_id arguments arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "_gen_roots_and_weights",
    "source_code": "def _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu):\n    k = np.arange(n, dtype='d')\n    c = np.zeros((2, n))\n    c[0, 1:] = bn_func(k[1:])\n    c[1, :] = an_func(k)\n    x = linalg.eigvals_banded(c, overwrite_a_band=True)\n    y = f(n, x)\n    dy = df(n, x)\n    x -= y / dy\n    fm = f(n - 1, x)\n    log_fm = np.log(np.abs(fm))\n    log_dy = np.log(np.abs(dy))\n    fm /= np.exp((log_fm.max() + log_fm.min()) / 2.0)\n    dy /= np.exp((log_dy.max() + log_dy.min()) / 2.0)\n    w = 1.0 / (fm * dy)\n    if symmetrize:\n        w = (w + w[::-1]) / 2\n        x = (x - x[::-1]) / 2\n    w *= mu0 / w.sum()\n    if mu:\n        return (x, w, mu0)\n    else:\n        return (x, w)",
    "docstring": "[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu) Returns the roots (x) of an nth order orthogonal polynomial, and weights (w) to use in appropriate Gaussian quadrature with that orthogonal polynomial. The polynomials have the recurrence relation P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x) an_func(n) should return A_n sqrt_bn_func(n) should return sqrt(B_n) mu ( = h_0 ) is the integral of the weight over the orthogonal interval",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:_gen_roots_and_weights arg:n arg:mu0 arg:an_func arg:bn_func arg:f arg:df arg:symmetrize arg:mu arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call Call Call Call Call Call Assign If Assign Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "translate",
    "source_code": "@set_module('numpy.strings')\n@array_function_dispatch(_translate_dispatcher)\ndef translate(a, table, deletechars=None):\n    a_arr = np.asarray(a)\n    if issubclass(a_arr.dtype.type, np.str_):\n        return _vec_string(a_arr, a_arr.dtype, 'translate', (table,))\n    else:\n        return _vec_string(a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))",
    "docstring": "For each element in , return a copy of the string where all characters occurring in the optional argument are removed, and the remaining characters have been mapped through the given translation table. Calls :meth: element-wise. Parameters ---------- a : array-like, with or dtype table : str of length 256 deletechars : str Returns ------- out : ndarray Output array of str or unicode, depending on input type See Also -------- str.translate Examples -------- >>> import numpy as np >>> a = np.array(['a1b c', '1bca', 'bca1']) >>> table = a[0].maketrans('abc', '123') >>> deletechars = ' ' >>> np.char.translate(a, table, deletechars) array(['112 3', '1231', '2311'], dtype='<U5')",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:translate arg:a arg:table arg:deletechars arguments arg arg arg Assign Call If Call Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_unsafe_unfinalize",
    "source_code": "def _unsafe_unfinalize(self) -> None:\n    self._finalized = False",
    "docstring": "Opposite of . Internal interface. NOTE: Unfinalizing a graph could have negative impact on performance, especially in a multi-threaded environment. Unfinalizing a graph when it is in use by a Session may lead to undefined behavior. Ensure that all sessions using a graph are closed before calling this method.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_unsafe_unfinalize arg:self arguments arg Assign"
  },
  {
    "library": "kornia",
    "name": "get_boxes_shape",
    "source_code": "def get_boxes_shape(self) -> tuple[torch.Tensor, torch.Tensor]:\n    boxes_xywh = cast(torch.Tensor, self.to_tensor('xywh', as_padded_sequence=True))\n    widths, heights = (boxes_xywh[..., 2], boxes_xywh[..., 3])\n    return (heights, widths)",
    "docstring": "Compute boxes heights and widths. Returns: - Boxes heights, shape of :math: or :math:. - Boxes widths, shape of :math: or :math:. Example: >>> boxes_xyxy = torch.tensor([[[1,1,2,2],[1,1,3,2]]]) >>> boxes = Boxes.from_tensor(boxes_xyxy) >>> boxes.get_boxes_shape() (tensor([[1., 1.]]), tensor([[1., 2.]]))",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:get_boxes_shape arg:self arguments arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "connectFailed",
    "source_code": "def connectFailed(self, reason: Failure) -> None:\n    self._tunnelReadyDeferred.errback(reason)",
    "docstring": "Propagates the errback to the appropriate deferred.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\downloader\\handlers\\http11.py",
    "ast_data": "FunctionDef name:connectFailed arg:self arg:reason arguments arg arg Call"
  },
  {
    "library": "pandas",
    "name": "_read_pyarrow",
    "source_code": "def _read_pyarrow(self) -> DataFrame:\n    pyarrow_json = import_optional_dependency('pyarrow.json')\n    options = None\n    if isinstance(self.dtype, dict):\n        pa = import_optional_dependency('pyarrow')\n        fields = []\n        for field, dtype in self.dtype.items():\n            pd_dtype = pandas_dtype(dtype)\n            if isinstance(pd_dtype, ArrowDtype):\n                fields.append((field, pd_dtype.pyarrow_dtype))\n        schema = pa.schema(fields)\n        options = pyarrow_json.ParseOptions(explicit_schema=schema, unexpected_field_behavior='infer')\n    pa_table = pyarrow_json.read_json(self.data, parse_options=options)\n    df = arrow_table_to_pandas(pa_table, dtype_backend=self.dtype_backend)\n    return df",
    "docstring": "Read JSON using the pyarrow engine.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\json\\_json.py",
    "ast_data": "FunctionDef name:_read_pyarrow arg:self arguments arg Assign Call Assign If Call Assign Call Assign For Call Assign Call If Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PerAxis",
    "source_code": "@dataclass(frozen=True)\nclass PerAxis(Granularity):\n    axis: int",
    "docstring": "Represents per-axis granularity in quantization. This granularity type calculates different quantization parameters along a specified axis of the tensor. For example if the input tensor is shape [8, 16] and axis=0, then the quantization parameters are calculated for each row of the tensor. Giving a total of 8 quantization parameters. Attributes: axis (int): The axis along which reduction is performed.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:PerAxis Call"
  },
  {
    "library": "kornia",
    "name": "quaternion_to_axis_angle",
    "source_code": "def quaternion_to_axis_angle(quaternion: Tensor) -> Tensor:\n    if not torch.is_tensor(quaternion):\n        raise TypeError(f'Input type is not a Tensor. Got {type(quaternion)}')\n    if not quaternion.shape[-1] == 4:\n        raise ValueError(f'Input must be a tensor of shape Nx4 or 4. Got {quaternion.shape}')\n    q1: Tensor = tensor([])\n    q2: Tensor = tensor([])\n    q3: Tensor = tensor([])\n    cos_theta: Tensor = tensor([])\n    cos_theta = quaternion[..., 0]\n    q1 = quaternion[..., 1]\n    q2 = quaternion[..., 2]\n    q3 = quaternion[..., 3]\n    sin_squared_theta: Tensor = q1 * q1 + q2 * q2 + q3 * q3\n    sin_theta: Tensor = torch.sqrt(sin_squared_theta)\n    two_theta: Tensor = 2.0 * where(cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta))\n    k_pos: Tensor = two_theta / sin_theta\n    k_neg: Tensor = 2.0 * torch.ones_like(sin_theta)\n    k: Tensor = where(sin_squared_theta > 0.0, k_pos, k_neg)\n    axis_angle: Tensor = torch.zeros_like(quaternion)[..., :3]\n    axis_angle[..., 0] += q1 * k\n    axis_angle[..., 1] += q2 * k\n    axis_angle[..., 2] += q3 * k\n    return axis_angle",
    "docstring": "Convert quaternion vector to axis angle of rotation in radians. The quaternion should be in (w, x, y, z) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion: tensor with quaternions. Return: tensor with axis angle of rotation. Shape: - Input: :math: where means, any number of dimensions - Output: :math: Example: >>> quaternion = tensor((1., 0., 0., 0.)) >>> quaternion_to_axis_angle(quaternion) tensor([0., 0., 0.])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:quaternion_to_axis_angle arg:quaternion arguments arg If Call Raise Call Call If Compare Raise Call Call Call Call Call Assign Assign Assign Assign Call Call Compare Call Call Call Call Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "set_locs",
    "source_code": "def set_locs(self, locs) -> None:\n    self.locs = locs\n    vmin, vmax = tuple(self.axis.get_view_interval())\n    if vmax < vmin:\n        vmin, vmax = (vmax, vmin)\n    self._set_default_format(vmin, vmax)",
    "docstring": "Sets the locations of the ticks",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py",
    "ast_data": "FunctionDef name:set_locs arg:self arg:locs arguments arg arg Assign Assign Call Call If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "device",
    "source_code": "@property\ndef device(self):\n    raise NotImplementedError",
    "docstring": "The device of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:device arg:self arguments arg Raise"
  },
  {
    "library": "pandas",
    "name": "_calc_max_cols_fitted",
    "source_code": "def _calc_max_cols_fitted(self) -> int | None:\n    if not self._is_in_terminal():\n        return self.max_cols\n    width, _ = get_terminal_size()\n    if self._is_screen_narrow(width):\n        return width\n    else:\n        return self.max_cols",
    "docstring": "Number of columns fitting the screen.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:_calc_max_cols_fitted arg:self arguments arg If Call Return return:yes Assign Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_dtensor",
    "source_code": "def is_dtensor(self, tensor: Any) -> bool:\n    if not context.executing_eagerly():\n        raise RuntimeError('`is_dtensor` must be called eagerly.')\n    if not tensor_util.is_tensor(tensor):\n        return False\n    if _pywrap_utils.IsVariable(tensor):\n        tensor = tensor._handle\n    return _pywrap_dtensor_device.IsDTensor(context.context()._handle, tensor, self._device_info)",
    "docstring": "Check whether the input tensor is a DTensor. In Python, a DTensor has the same type as a . This method will let you check and handle the tensor differently if a tf.Tensor is a DTensor. Args: tensor: an object to be checked. Returns: bool, True if the given tensor is a DTensor. Raises: RuntimeError: When not called eagerly.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py",
    "ast_data": "FunctionDef name:is_dtensor arg:self arg:tensor arguments arg arg If Call Raise Call If Call Return return:yes If Call Assign Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "paragraphs",
    "source_code": "def paragraphs(count, common=True):\n    paras = []\n    for i in range(count):\n        if common and i == 0:\n            paras.append(COMMON_P)\n        else:\n            paras.append(paragraph())\n    return paras",
    "docstring": "Return a list of paragraphs as returned by paragraph(). If is True, then the first paragraph will be the standard 'lorem ipsum' paragraph. Otherwise, the first paragraph will be random Latin text. Either way, subsequent paragraphs will be random Latin text.",
    "type": "function",
    "file_path": "django\\django\\utils\\lorem_ipsum.py",
    "ast_data": "FunctionDef name:paragraphs arg:count arg:common arguments arg arg Assign For Call If BoolOp Compare Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ListFetchMapper",
    "source_code": "class _ListFetchMapper(_FetchMapper):\n\n    def __init__(self, fetches):\n        if isinstance(fetches, wrapt.ObjectProxy):\n            self._fetch_type = type(fetches.__wrapped__)\n        else:\n            self._fetch_type = type(fetches)\n        self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]\n        self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)\n\n    def unique_fetches(self):\n        return self._unique_fetches\n\n    def build_results(self, values):\n        results = []\n        for m, vi in zip(self._mappers, self._value_indices):\n            results.append(m.build_results([values[j] for j in vi]))\n        if issubclass(self._fetch_type, list):\n            return results\n        elif self._fetch_type == tuple:\n            return tuple(results)\n        else:\n            return self._fetch_type(*results)",
    "docstring": "Fetch mapper for lists, tuples, and namedtuples.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "ClassDef name:_ListFetchMapper FunctionDef name:__init__ arg:self arg:fetches arguments arg arg If Call Assign Call Assign Call Assign Call Assign Call FunctionDef name:unique_fetches arg:self arguments arg Return return:yes FunctionDef name:build_results arg:self arg:values arguments arg arg Assign For Call Call Call If Call Return return:yes If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_tupleize_axis_indexer",
    "source_code": "def _tupleize_axis_indexer(ndim: int, axis: AxisInt, key) -> tuple:\n    new_key = [slice(None)] * ndim\n    new_key[axis] = key\n    return tuple(new_key)",
    "docstring": "If we have an axis, adapt the given key to be axis-independent.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexing.py",
    "ast_data": "FunctionDef name:_tupleize_axis_indexer arg:ndim arg:axis arg:key arguments arg arg arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__iter__",
    "source_code": "def __iter__(self) -> Iterator:\n    if self.ndim == 1:\n        return iter(self._internal_get_values().tolist())\n    else:\n        return (self[n] for n in range(len(self)))",
    "docstring": "Returns an Iterator over the values of this Categorical.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg If Compare Return return:yes Call Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_getattribute",
    "source_code": "def _getattribute(self, name):\n    func__fastdict_insert = object.__getattribute__(self, '_fastdict_insert')\n    if name == 'lite':\n        if self._tfmw_has_lite:\n            attr = self._tfmw_import_module(name)\n            setattr(self._tfmw_wrapped_module, 'lite', attr)\n            func__fastdict_insert(name, attr)\n            return attr\n    attr = object.__getattribute__(self, name)\n    if name.startswith('__') or name.startswith('_tfmw_') or name.startswith('_fastdict_'):\n        func__fastdict_insert(name, attr)\n        return attr\n    if not (self._tfmw_print_deprecation_warnings and self._tfmw_add_deprecation_warning(name, attr)):\n        func__fastdict_insert(name, attr)\n    return attr",
    "docstring": "Imports and caches pre-defined API. Warns if necessary. This method is a replacement for __getattribute__(). It will be added into the extended python module as a callback to reduce API overhead.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\module_wrapper.py",
    "ast_data": "FunctionDef name:_getattribute arg:self arg:name arguments arg arg Assign Call If Compare If Assign Call Call Call Return return:yes Assign Call If BoolOp Call Call Call Call Return return:yes If BoolOp Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "tick_values",
    "source_code": "def tick_values(self, vmin, vmax):\n    if self.nbins is None:\n        return self.locs\n    step = max(int(np.ceil(len(self.locs) / self.nbins)), 1)\n    ticks = self.locs[::step]\n    for i in range(1, step):\n        ticks1 = self.locs[i::step]\n        if np.abs(ticks1).min() < np.abs(ticks).min():\n            ticks = ticks1\n    return self.raise_if_exceeds(ticks)",
    "docstring": "Return the locations of the ticks. .. note:: Because the values are fixed, *vmin* and *vmax* are not used.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:tick_values arg:self arg:vmin arg:vmax arguments arg arg arg If Compare Return return:yes Assign Call Call Call Call Assign For Call Assign If Compare Call Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_join_procs_with_timeout",
    "source_code": "def _join_procs_with_timeout(self, timeout: float):\n    end = time.monotonic() + timeout\n    for process in self.processes:\n        time_to_wait = max(0, end - time.monotonic())\n        process.join(time_to_wait)",
    "docstring": "Attempt to join all processes with a shared timeout.",
    "type": "method",
    "file_path": "pytorch\\torch\\multiprocessing\\spawn.py",
    "ast_data": "FunctionDef name:_join_procs_with_timeout arg:self arg:timeout arguments arg arg Assign Call For Assign Call Call Call"
  },
  {
    "library": "numpy",
    "name": "IntelEM64TCCompilerW",
    "source_code": "class IntelEM64TCCompilerW(IntelCCompilerW):\n    compiler_type = 'intelemw'\n\n    def __init__(self, verbose=0, dry_run=0, force=0):\n        MSVCCompiler.__init__(self, verbose, dry_run, force)\n        version_match = simple_version_match(start='Intel\\\\(R\\\\).*?64,')\n        self.__version = version_match",
    "docstring": "A modified Intel x86_64 compiler compatible with a 64bit MSVC-built Python.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\intelccompiler.py",
    "ast_data": "ClassDef name:IntelEM64TCCompilerW Assign FunctionDef name:__init__ arg:self arg:verbose arg:dry_run arg:force arguments arg arg arg arg Call Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "list_backends",
    "source_code": "def list_backends(exclude_tags=('debug', 'experimental')) -> list[str]:\n    _lazy_import()\n    exclude_tags = set(exclude_tags or ())\n    backends = [name for name in _BACKENDS.keys() if name not in _COMPILER_FNS or not exclude_tags.intersection(_COMPILER_FNS[name]._tags)]\n    return sorted(backends)",
    "docstring": "Return valid strings that can be passed to: torch.compile(..., backend=\"name\")",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\backends\\registry.py",
    "ast_data": "FunctionDef name:list_backends arg:exclude_tags arguments arg Call Assign Call BoolOp Assign Call BoolOp Compare Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, args=None):\n    self.root = Path(__file__).parent.absolute()\n    if not args:\n        return\n    self.build = Path(args.build_dir).resolve()\n    if args.install_prefix:\n        self.installed = Path(args.install_prefix).resolve()\n    else:\n        self.installed = self.build.parent / (self.build.stem + '-install')\n    self.site = self.get_site_packages()",
    "docstring": ":params args: object like Context(build_dir, install_prefix)",
    "type": "method",
    "file_path": "scipy\\dev.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:args arguments arg arg Assign Call Call If Return return:no Assign Call Call If Assign Call Call Assign Assign Call"
  },
  {
    "library": "django",
    "name": "extent",
    "source_code": "@property\ndef extent(self):\n    env = OGREnvelope()\n    capi.get_extent(self.ptr, byref(env), 1)\n    return Envelope(env)",
    "docstring": "Return the extent (an Envelope) of this layer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py",
    "ast_data": "FunctionDef name:extent arg:self arguments arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "chebpts1",
    "source_code": "def chebpts1(npts):\n    _npts = int(npts)\n    if _npts != npts:\n        raise ValueError('npts must be integer')\n    if _npts < 1:\n        raise ValueError('npts must be >= 1')\n    x = 0.5 * np.pi / _npts * np.arange(-_npts + 1, _npts + 1, 2)\n    return np.sin(x)",
    "docstring": "Chebyshev points of the first kind. The Chebyshev points of the first kind are the points ``. Parameters ---------- npts : int Number of sample points desired. Returns ------- pts : ndarray The Chebyshev points of the first kind. See Also -------- chebpts2",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\chebyshev.py",
    "ast_data": "FunctionDef name:chebpts1 arg:npts arguments arg Assign Call If Compare Raise Call If Compare Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "register_nonce_hooks",
    "source_code": "def register_nonce_hooks(authorization_server, cache, key_prefix='nonce:', expires=86400):\n    exists_nonce = create_exists_nonce_func(cache, key_prefix, expires)\n    authorization_server.register_hook('exists_nonce', exists_nonce)",
    "docstring": "Register nonce related hooks to authorization server. :param authorization_server: AuthorizationServer instance :param cache: Cache instance :param key_prefix: key prefix for temporary credential :param expires: Expire time for nonce",
    "type": "function",
    "file_path": "authlib\\authlib\\integrations\\flask_oauth1\\cache.py",
    "ast_data": "FunctionDef name:register_nonce_hooks arg:authorization_server arg:cache arg:key_prefix arg:expires arguments arg arg arg arg Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "present",
    "source_code": "def present(x):\n    return x is not None",
    "docstring": "This is a Python equivalent of the Fortran 'present' function for optional arguments.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\present.py",
    "ast_data": "FunctionDef name:present arg:x arguments arg Return return:yes Compare"
  },
  {
    "library": "pytorch",
    "name": "_free",
    "source_code": "def _free(self, buffer: Union[CodegenBuffer, ir.TorchBindObject]) -> None:\n    name = buffer.get_name()\n    del self.buffer_to_node[name]",
    "docstring": "Removes the buffer from the symbol table.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper_fxir.py",
    "ast_data": "FunctionDef name:_free arg:self arg:buffer arguments arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_estimate_data_distribution",
    "source_code": "def _estimate_data_distribution(c, num_examples_per_class_seen):\n    num_classes = num_examples_per_class_seen.get_shape()[0]\n    num_examples_per_class_seen = math_ops.add(num_examples_per_class_seen, math_ops.reduce_sum(array_ops.one_hot(c, num_classes, dtype=dtypes.int64), 0))\n    init_prob_estimate = math_ops.truediv(num_examples_per_class_seen, math_ops.reduce_sum(num_examples_per_class_seen))\n    dist = math_ops.cast(init_prob_estimate, dtypes.float32)\n    return (num_examples_per_class_seen, dist)",
    "docstring": "Estimate data distribution as labels are seen. Args: c: The class labels. Type , shape . num_examples_per_class_seen: Type , shape , containing counts. Returns: num_examples_per_lass_seen: Updated counts. Type , shape . dist: The updated distribution. Type , shape .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_estimate_data_distribution arg:c arg:num_examples_per_class_seen arguments arg arg Assign Call Assign Call Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_tensors",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef from_tensors(self, tensors):\n    return super().from_tensors(tensors)",
    "docstring": "See tf.types.experimental.TraceType base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:from_tensors arg:self arg:tensors arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, features: Any, weights: Optional[Any]=None) -> Any:\n    if not self._built:\n        self.build()\n    return self.embedding_lookup(features, weights)",
    "docstring": "Call the mid level api to do embedding lookup.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_base.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:features arg:weights arguments arg arg arg If Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "SphinxWarningLogRecord",
    "source_code": "class SphinxWarningLogRecord(SphinxLogRecord):\n\n    @property\n    def prefix(self) -> str:\n        if self.levelno >= logging.CRITICAL:\n            return 'CRITICAL: '\n        elif self.levelno >= logging.ERROR:\n            return 'ERROR: '\n        else:\n            return 'WARNING: '",
    "docstring": "Warning log record class supporting location",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "ClassDef name:SphinxWarningLogRecord FunctionDef name:prefix arg:self arguments arg If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_preload_simple_restoration",
    "source_code": "def _preload_simple_restoration(self, name):\n    deferred_dependencies_list = self._deferred_dependencies.get(name, ())\n    if not deferred_dependencies_list:\n        return\n    for checkpoint_position in deferred_dependencies_list:\n        if not checkpoint_position.is_simple_variable():\n            return None\n    checkpoint_position = max(deferred_dependencies_list, key=lambda restore: restore.checkpoint.restore_uid)\n    return CheckpointInitialValueCallable(checkpoint_position=checkpoint_position)",
    "docstring": "Return a dependency's value for restore-on-create. Note the restoration is not deleted; if for some reason preload is called and then not assigned to the variable (for example because a custom getter overrides the initializer), the assignment will still happen once the variable is tracked (determined based on checkpoint.restore_uid). Args: name: The object-local name of the dependency holding the variable's value. Returns: An callable for use as a variable's initializer/initial_value, or None if one should not be set (either because there was no variable with this name in the checkpoint or because it needs more complex deserialization). Any non-trivial deserialization will happen when the variable object is tracked.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_preload_simple_restoration arg:self arg:name arguments arg arg Assign Call If Return return:no For If Call Return return:no Assign Call arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "sequence_categorical_column_with_vocabulary_file",
    "source_code": "@doc_controls.header(_FEATURE_COLUMN_DEPRECATION_WARNING)\n@tf_export('feature_column.sequence_categorical_column_with_vocabulary_file')\n@deprecation.deprecated(None, _FEATURE_COLUMN_DEPRECATION_RUNTIME_WARNING)\ndef sequence_categorical_column_with_vocabulary_file(key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0, default_value=None, dtype=dtypes.string):\n    return fc.SequenceCategoricalColumn(fc.categorical_column_with_vocabulary_file(key=key, vocabulary_file=vocabulary_file, vocabulary_size=vocabulary_size, num_oov_buckets=num_oov_buckets, default_value=default_value, dtype=dtype))",
    "docstring": "A sequence of categorical terms where ids use a vocabulary file. Pass this to or to convert sequence categorical data into dense representation for input to sequence NN, such as RNN. Example: Args: key: A unique string identifying the input feature. vocabulary_file: The vocabulary file name. vocabulary_size: Number of the elements in the vocabulary. This must be no greater than length of , if less than length, later values are ignored. If None, it is set to the length of . num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range based on a hash of the input value. A positive can not be specified with . default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to . This can not be specified with a positive . dtype: The type of features. Only string and integer types are supported. Returns: A . Raises: ValueError: is missing or cannot be opened. ValueError: is missing or < 1. ValueError: is a negative integer. ValueError: and are both specified. ValueError: is neither string nor integer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py",
    "ast_data": "FunctionDef name:sequence_categorical_column_with_vocabulary_file arg:key arg:vocabulary_file arg:vocabulary_size arg:num_oov_buckets arg:default_value arg:dtype arguments arg arg arg arg arg arg Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "patch_so",
    "source_code": "def patch_so(srcs_dir: str) -> None:\n    to_patch = {'tensorflow/python/_pywrap_tensorflow_internal.so': '$ORIGIN/../../tensorflow/compiler/xla/tsl/python/lib/core', 'tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_function_lib.so': '$ORIGIN/../../../../../python', 'tensorflow/compiler/mlir/quantization/tensorflow/python/pywrap_quantize_model.so': '$ORIGIN/../../../../../python', 'tensorflow/compiler/mlir/tensorflow_to_stablehlo/python/pywrap_tensorflow_to_stablehlo.so': '$ORIGIN/../../../../python', 'tensorflow/compiler/mlir/lite/python/_pywrap_converter_api.so': '$ORIGIN/../../../../python'}\n    for file, path in to_patch.items():\n        rpath = subprocess.check_output(['patchelf', '--print-rpath', '{}/{}'.format(srcs_dir, file)]).decode().strip()\n        new_rpath = rpath + ':' + path\n        subprocess.run(['patchelf', '--set-rpath', new_rpath, '{}/{}'.format(srcs_dir, file)], check=True)\n        subprocess.run(['patchelf', '--shrink-rpath', '{}/{}'.format(srcs_dir, file)], check=True)",
    "docstring": "Patch .so files. We must patch some of .so files otherwise auditwheel will fail. Args: srcs_dir: target directory with .so files to patch.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\build_pip_package.py",
    "ast_data": "FunctionDef name:patch_so arg:srcs_dir arguments arg Assign For Call Assign Call Call Call Call Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "num_work_units_completed",
    "source_code": "def num_work_units_completed(self, name=None):\n    if self._reader_ref.dtype == dtypes.resource:\n        return gen_io_ops.reader_num_work_units_completed_v2(self._reader_ref, name=name)\n    else:\n        return gen_io_ops.reader_num_work_units_completed(self._reader_ref, name=name)",
    "docstring": "Returns the number of work units this reader has finished processing. Args: name: A name for the operation (optional). Returns: An int64 Tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py",
    "ast_data": "FunctionDef name:num_work_units_completed arg:self arg:name arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    xp, _ = get_namespace(X)\n    scores = self.decision_function(X)\n    if len(scores.shape) == 1:\n        indices = xp.astype(scores > 0, indexing_dtype(xp))\n    else:\n        indices = xp.argmax(scores, axis=1)\n    return xp.take(self.classes_, indices, axis=0)",
    "docstring": "Predict class labels for samples in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data matrix for which we want to get the predictions. Returns ------- y_pred : ndarray of shape (n_samples,) Vector containing the class labels for each sample.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Assign Call If Compare Call Assign Call Compare Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "num_buckets",
    "source_code": "@property\ndef num_buckets(self):\n    return (len(self.boundaries) + 1) * self.source_column.shape[0]",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:num_buckets arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_wrap_result_adbc",
    "source_code": "def _wrap_result_adbc(df: DataFrame, *, index_col=None, parse_dates=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame:\n    if dtype:\n        df = df.astype(dtype)\n    df = _parse_date_columns(df, parse_dates)\n    if index_col is not None:\n        df = df.set_index(index_col)\n    return df",
    "docstring": "Wrap result set of a SQLAlchemy query in a DataFrame.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\sql.py",
    "ast_data": "FunctionDef name:_wrap_result_adbc arg:df arguments arg arg arg arg arg If Assign Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "scatter_upon_const_tensor",
    "source_code": "@register_lowering_pattern(CallFunction(aten.scatter.value, CallFunction(aten.full, KeywordArg('shape'), KeywordArg('background_val'), dtype=KeywordArg('dtype')), KeywordArg('dim'), KeywordArg('selector'), KeywordArg('val')), extra_check=scatter_upon_const_tensor_extra_check)\ndef scatter_upon_const_tensor(match: Match, shape, background_val, dtype, dim, selector, val):\n    from torch._inductor import metrics\n    metrics.num_matches_for_scatter_upon_const_tensor += 1\n    selector_loader = selector.make_loader()\n\n    def inner_fn(idx):\n        selector_idx = list(idx)\n        selector_idx[dim] = 0\n        selector = selector_loader(selector_idx)\n        return ops.where(selector == ops.index_expr(idx[dim], torch.int64), ops.constant(val, dtype), ops.constant(background_val, dtype))\n    return ir.Pointwise.create(device=selector.get_device(), dtype=dtype, inner_fn=inner_fn, ranges=shape)",
    "docstring": "Match the pattern of full+scatter into a pointwise. TODO: Right now the scatter value must be a scalar. But we could support it when it is a tensor as well.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:scatter_upon_const_tensor arg:match arg:shape arg:background_val arg:dtype arg:dim arg:selector arg:val arguments arg arg arg arg arg arg arg Assign Call FunctionDef name:inner_fn arg:idx arguments arg Assign Call Assign Assign Call Return return:yes Call Compare Call Call Call Return return:yes Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scope_name",
    "source_code": "@property\ndef scope_name(self):\n    return self._thread_local_data.scope_name",
    "docstring": "Returns scope name for the current thread.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:scope_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_png",
    "source_code": "def _is_png(contents, name=None):\n    with ops.name_scope(name, 'is_png'):\n        substr = string_ops.substr(contents, 0, 3)\n        return math_ops.equal(substr, b'\\x89PN', name=name)",
    "docstring": "Convenience function to check if the 'contents' encodes a PNG image. Args: contents: 0-D . The encoded image bytes. name: A name for the operation (optional) Returns: A scalar boolean tensor indicating if 'contents' may be a PNG image. is_png is susceptible to false positives.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_is_png arg:contents arg:name arguments arg arg With Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_conv_bn_add_extra_inputs_getter_left",
    "source_code": "def _conv_bn_add_extra_inputs_getter_left(add_pattern):\n    _, _bn_conv, extra_input = add_pattern\n    return [extra_input]",
    "docstring": "get inputs pattern for extra inputs, inputs for root node are assumed to be copied over from root node to the fused node",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\onednn.py",
    "ast_data": "FunctionDef name:_conv_bn_add_extra_inputs_getter_left arg:add_pattern arguments arg Assign Return return:yes"
  },
  {
    "library": "pygame",
    "name": "get_cursor",
    "source_code": "def get_cursor():\n    return Cursor(*pygame.mouse._get_cursor())",
    "docstring": "get_cursor() -> pygame.cursors.Cursor get the current mouse cursor",
    "type": "function",
    "file_path": "pygame\\src_py\\cursors.py",
    "ast_data": "FunctionDef name:get_cursor arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_batch_update_progbar",
    "source_code": "def _batch_update_progbar(self, batch, logs=None):\n    logs = logs or {}\n    self._maybe_init_progbar()\n    if self.use_steps:\n        self.seen = batch + 1\n    else:\n        logs = copy.copy(logs)\n        batch_size = logs.pop('size', 0)\n        num_steps = logs.pop('num_steps', 1)\n        logs.pop('batch', None)\n        add_seen = num_steps * batch_size\n        self.seen += add_seen\n    if self.verbose == 1:\n        logs = tf_utils.sync_to_numpy_or_python_type(logs)\n        self.progbar.update(self.seen, list(logs.items()), finalize=False)",
    "docstring": "Updates the progbar.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_batch_update_progbar arg:self arg:batch arg:logs arguments arg arg arg Assign BoolOp Call If Assign Assign Call Assign Call Assign Call Call Assign If Compare Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "freeze",
    "source_code": "def freeze(self):\n\n    def _constant_state():\n        return constant_op.constant(self._state_callback(), dtype=dtypes.string)\n    return trackable.NoRestoreSaveable(tensor=_constant_state, dtype=dtypes.string, name=self.name, device='cpu:0')",
    "docstring": "Create a frozen which saves the current state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py",
    "ast_data": "FunctionDef name:freeze arg:self arguments arg FunctionDef name:_constant_state arguments Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_native_function_declarations",
    "source_code": "def get_native_function_declarations(*, grouped_native_functions: Sequence[NativeFunction | NativeFunctionsGroup], backend_indices: dict[DispatchKey, BackendIndex], native_function_decl_gen: Callable[[NativeFunctionsGroup | NativeFunction, BackendIndex], list[str]]=dest.compute_native_function_declaration) -> list[str]:\n    ns_grouped_kernels = get_ns_grouped_kernels(grouped_native_functions=grouped_native_functions, backend_indices=backend_indices, native_function_decl_gen=native_function_decl_gen)\n    return get_native_function_declarations_from_ns_grouped_kernels(ns_grouped_kernels=ns_grouped_kernels)",
    "docstring": "Generate kernel declarations, in . :param grouped_native_functions: a sequence of or . :param backend_indices: kernel collections grouped by dispatch key. :param native_function_decl_gen: callable to generate kernel declaration for each . :return: a list of string, from the string with all declarations, grouped by namespaces, split by newline.",
    "type": "function",
    "file_path": "pytorch\\torchgen\\gen.py",
    "ast_data": "FunctionDef name:get_native_function_declarations arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_mutation_aspect",
    "source_code": "def set_mutation_aspect(self, aspect):\n    self._mutation_aspect = aspect\n    self.stale = True",
    "docstring": "Set the aspect ratio of the bbox mutation. Parameters ---------- aspect : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_mutation_aspect arg:self arg:aspect arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_infer_type",
    "source_code": "def _infer_type(str_val, na_value, prev_type):\n    if str_val in ('', na_value):\n        return prev_type\n    type_list = [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string]\n    type_functions = [_is_valid_int32, _is_valid_int64, lambda str_val: _is_valid_float(str_val, dtypes.float32), lambda str_val: _is_valid_float(str_val, dtypes.float64), lambda str_val: True]\n    for i in range(len(type_list)):\n        validation_fn = type_functions[i]\n        if validation_fn(str_val) and (prev_type is None or prev_type in type_list[:i + 1]):\n            return type_list[i]",
    "docstring": "Given a string, infers its tensor type. Infers the type of a value by picking the least 'permissive' type possible, while still allowing the previous type inference for this column to be valid. Args: str_val: String value to infer the type of. na_value: Additional string to recognize as a NA/NaN CSV value. prev_type: Type previously inferred based on values of this column that we've seen up till now. Returns: Inferred dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\readers.py",
    "ast_data": "FunctionDef name:_infer_type arg:str_val arg:na_value arg:prev_type arguments arg arg arg If Compare Return return:yes Assign Assign arguments arg Call arguments arg Call arguments arg For Call Call Assign If BoolOp Call BoolOp Compare Compare Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None, **params):\n    _raise_for_params(params, self, 'fit')\n    self.fit_transform(X, y=y, **params)\n    return self",
    "docstring": "Fit all transformers using X. Parameters ---------- X : {array-like, dataframe} of shape (n_samples, n_features) Input data, of which specified subsets are used to fit the transformers. y : array-like of shape (n_samples,...), default=None Targets for supervised learning. **params : dict, default=None Parameters to be passed to the underlying transformers' ``. .. versionadded:: 1.4 Returns ------- self : ColumnTransformer This estimator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg arg Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_on_read_restore_ops",
    "source_code": "def get_on_read_restore_ops(var, tensor, aggregation):\n    if aggregation == vs.VariableAggregation.SUM:\n        strategy = var.distribute_strategy\n        tensor = math_ops.cast(tensor / strategy.num_replicas_in_sync, var.dtype)\n    return control_flow_ops.group(tuple((assign_on_device(v.device, v, tensor) for v in var.values)))",
    "docstring": "Return restore ops for ON_READ variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py",
    "ast_data": "FunctionDef name:get_on_read_restore_ops arg:var arg:tensor arg:aggregation arguments arg arg arg If Compare Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "reduce_prod_v1",
    "source_code": "@tf_export(v1=['math.reduce_prod', 'reduce_prod'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_args(None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims')\ndef reduce_prod_v1(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None):\n    axis = deprecation.deprecated_argument_lookup('axis', axis, 'reduction_indices', reduction_indices)\n    keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims)\n    return reduce_prod(input_tensor, axis, keepdims, name)",
    "docstring": "Computes of elements across dimensions of a tensor. This is the reduction operation for the elementwise op. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each of the entries in , which must be unique. If is true, the reduced dimensions are retained with length 1. If is None, all dimensions are reduced, and a tensor with a single element is returned. For example: >>> x = tf.constant([[1., 2.], [3., 4.]]) >>> tf.math.reduce_prod(x) >>> tf.math.reduce_prod(x, 0) >>> tf.math.reduce_prod(x, 1) Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). reduction_indices: The old (deprecated) name for axis. keep_dims: Deprecated alias for . Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.prod @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:reduce_prod_v1 arg:input_tensor arg:axis arg:keepdims arg:name arg:reduction_indices arg:keep_dims arguments arg arg arg arg arg arg Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "generate_all_broadcasting_possibilities_no_padding",
    "source_code": "def generate_all_broadcasting_possibilities_no_padding(d1: list[DVar], d2: list[DVar], d11: list[DVar], d12: list[DVar]):\n    size = len(d1)\n    res2 = []\n    for i in range(size):\n        t1 = broadcast_dim(d1, d2, d11, d12, i)\n        t2 = broadcast_dim(d2, d1, d12, d11, i)\n        t3 = no_broadcast_dim_with_index(d1, d2, d11, d12, i)\n        res2.append(Disj([t1, t2, t3]))\n    return Conj(res2)",
    "docstring": "Generate broadcasting constraints assuming no padding. Broadcasting can happen at any dimension. We look at all combinations for all dimensions in d1 and d2 Args: d1: input1 dimensions d2: input2 dimensions d11: broadcasted input1 dimensions d12: broadcasted input2 dimensions Returns: broadcasting constraints relating the input dimensions to the broadcasted dimensions",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py",
    "ast_data": "FunctionDef name:generate_all_broadcasting_possibilities_no_padding arg:d1 arg:d2 arg:d11 arg:d12 arguments arg arg arg arg Assign Call Assign For Call Assign Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "page_range",
    "source_code": "@property\ndef page_range(self):\n    return range(1, self.num_pages + 1)",
    "docstring": "Return a 1-based range of pages for iterating through within a template for loop.",
    "type": "method",
    "file_path": "django\\django\\core\\paginator.py",
    "ast_data": "FunctionDef name:page_range arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "remove",
    "source_code": "def remove(self):\n    if self._remove_method is not None:\n        self._remove_method(self)\n        self.stale_callback = None\n        _ax_flag = False\n        if hasattr(self, 'axes') and self.axes:\n            self.axes._mouseover_set.discard(self)\n            self.axes.stale = True\n            self.axes = None\n            _ax_flag = True\n        if (fig := self.get_figure(root=False)) is not None:\n            if not _ax_flag:\n                fig.stale = True\n            self._parent_figure = None\n    else:\n        raise NotImplementedError('cannot remove artist')",
    "docstring": "Remove the artist from the figure if possible. The effect will not be visible until the figure is redrawn, e.g., with . Call to update the Axes limits if desired. Note: will not see collections even if the collection was added to the Axes with *autolim* = True. Note: there is no support for removing the artist's legend entry.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:remove arg:self arguments arg If Compare Call Assign Assign If BoolOp Call Call Assign Assign Assign If Compare Call If Assign Assign Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, iterable):\n    config = get_config()\n    warning_filters = warnings.filters\n    iterable_with_config_and_warning_filters = ((_with_config_and_warning_filters(delayed_func, config, warning_filters), args, kwargs) for delayed_func, args, kwargs in iterable)\n    return super().__call__(iterable_with_config_and_warning_filters)",
    "docstring": "Dispatch the tasks and return the results. Parameters ---------- iterable : iterable Iterable containing tuples of (delayed_function, args, kwargs) that should be consumed. Returns ------- results : list List of results of the tasks.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\parallel.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:iterable arguments arg arg Assign Call Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_skip_common_stack_elements",
    "source_code": "def _skip_common_stack_elements(stacktrace, base_case):\n    for i, (trace, base) in enumerate(zip(stacktrace, base_case)):\n        if trace != base:\n            return stacktrace[i:]\n    return stacktrace[-1:]",
    "docstring": "Skips items that the target stacktrace shares with the base stacktrace.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:_skip_common_stack_elements arg:stacktrace arg:base_case arguments arg arg For Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "request_stop",
    "source_code": "def request_stop(self):\n    self._stop_requested = True",
    "docstring": "Sets stop requested field. Hooks can use this function to request stop of iterations. checks whether this is called or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py",
    "ast_data": "FunctionDef name:request_stop arg:self arguments arg Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_angle",
    "source_code": "def set_angle(self, angle):\n    self.angle = angle\n    self.stale = True",
    "docstring": "Set the rotation angle in degrees. The rotation is performed anti-clockwise around *xy*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_angle arg:self arg:angle arguments arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "GPUCompatibleFIFOQueue",
    "source_code": "class GPUCompatibleFIFOQueue(QueueBase):\n\n    def __init__(self, capacity, dtypes, shapes=None, names=None, shared_name=None, name='fifo_queue'):\n        dtypes = _as_type_list(dtypes)\n        shapes = _as_shape_list(shapes, dtypes)\n        names = _as_name_list(names, dtypes)\n        with ops.init_scope():\n            queue_ref = gen_data_flow_ops.fifo_queue_v2(component_types=dtypes, shapes=shapes, capacity=capacity, shared_name=_shared_name(shared_name), name=name)\n        super(GPUCompatibleFIFOQueue, self).__init__(dtypes, shapes, names, queue_ref)\n\n    def enqueue_many(self, vals, name=None):\n        raise NotImplementedError('GPUCompatibleFIFOQueue does not support enqueue_many or dequeue_many, only enqueue and dequeue.')\n\n    def dequeue_many(self, n, name=None):\n        raise NotImplementedError('GPUCompatibleFIFOQueue does not support enqueue_many or dequeue_many, only enqueue and dequeue.')",
    "docstring": "A queue implementation that dequeues elements in first-in first-out order. GPUCompatibleFIFOQueue is like FIFOQueue, but the queue resource may be placed either on a CPU or on a GPU. It is not cross-device: enqueues and dequeues will be colocated with the queue resource. GPUCompatibleFIFOQueue only supports enqueue and dequeue at the moment, not enqueue_many or dequeue_many. See for a description of the methods in this class.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "ClassDef name:GPUCompatibleFIFOQueue FunctionDef name:__init__ arg:self arg:capacity arg:dtypes arg:shapes arg:names arg:shared_name arg:name arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call With Call Assign Call Call Call Call FunctionDef name:enqueue_many arg:self arg:vals arg:name arguments arg arg arg Raise Call FunctionDef name:dequeue_many arg:self arg:n arg:name arguments arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_convert_equalization_ref",
    "source_code": "def _convert_equalization_ref(model: GraphModule):\n    modules = dict(model.named_modules(remove_duplicate=False))\n    weight_eq_obs_dict = update_obs_for_equalization(model, modules)\n    convert_eq_obs(model, modules, weight_eq_obs_dict)\n    return GraphModule(model, model.graph)",
    "docstring": "Reference function which applies changes needed for equalization, but does not quantize the nodes",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:_convert_equalization_ref arg:model arguments arg Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "fix_node_def",
    "source_code": "def fix_node_def(node_def, functions, shared_name_suffix):\n    if node_def.op in functions:\n        node_def.op = functions[node_def.op].name\n    for _, attr_value in node_def.attr.items():\n        if attr_value.WhichOneof('value') == 'func':\n            attr_value.func.name = functions[attr_value.func.name].name\n        elif attr_value.WhichOneof('value') == 'list':\n            for fn in attr_value.list.func:\n                fn.name = functions[fn.name].name\n    if node_def.op == 'HashTableV2':\n        if 'use_node_name_sharing' not in node_def.attr or not node_def.attr['use_node_name_sharing'].b:\n            node_def.attr['use_node_name_sharing'].b = True\n            shared_name_suffix += '_{}'.format(ops.uid())\n    op_def = op_def_registry.get(node_def.op)\n    if op_def:\n        attr = next((a for a in op_def.attr if a.name == 'shared_name'), None)\n        if attr:\n            shared_name = None\n            if 'shared_name' in node_def.attr and node_def.attr['shared_name'].s:\n                shared_name = node_def.attr['shared_name'].s\n            elif attr.default_value.s:\n                shared_name = compat.as_bytes(attr.default_value.s)\n            if not shared_name:\n                shared_name = compat.as_bytes(node_def.name)\n            node_def.attr['shared_name'].s = shared_name + compat.as_bytes(shared_name_suffix)",
    "docstring": "Replace functions calls and shared names in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\function_deserialization.py",
    "ast_data": "FunctionDef name:fix_node_def arg:node_def arg:functions arg:shared_name_suffix arguments arg arg arg If Compare Assign For Call If Compare Call Assign If Compare Call For Assign If Compare If BoolOp Compare Assign Call Call Assign Call If Assign Call Compare If Assign If BoolOp Compare Assign If Assign Call If Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "_verbose_printer",
    "source_code": "def _verbose_printer(verbose: bool | None) -> Callable[..., None]:\n    if verbose is False:\n        return lambda *_, **__: None\n    return lambda *args, **kwargs: print('[torch.onnx]', *args, **kwargs)",
    "docstring": "Prints messages based on .",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_core.py",
    "ast_data": "FunctionDef name:_verbose_printer arg:verbose arguments arg If Compare Return return:yes arguments arg arg Return return:yes arguments arg arg Call"
  },
  {
    "library": "pandas",
    "name": "book",
    "source_code": "@property\ndef book(self) -> _WorkbookT:\n    raise NotImplementedError",
    "docstring": "Book instance. Class type will depend on the engine used. This attribute can be used to access engine-specific features.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_base.py",
    "ast_data": "FunctionDef name:book arg:self arguments arg Raise"
  },
  {
    "library": "scikit-learn",
    "name": "_iter",
    "source_code": "def _iter(self, with_final=True, filter_passthrough=True):\n    stop = len(self.steps)\n    if not with_final:\n        stop -= 1\n    for idx, (name, trans) in enumerate(islice(self.steps, 0, stop)):\n        if not filter_passthrough:\n            yield (idx, name, trans)\n        elif trans is not None and trans != 'passthrough':\n            yield (idx, name, trans)",
    "docstring": "Generate (idx, (name, trans)) tuples from self.steps When filter_passthrough is True, 'passthrough' and None transformers are filtered out.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:_iter arg:self arg:with_final arg:filter_passthrough arguments arg arg arg Assign Call If For Call Call If If BoolOp Compare Compare"
  },
  {
    "library": "tensorflow",
    "name": "_export_to_saved_model_graph",
    "source_code": "def _export_to_saved_model_graph(self, object_map, tensor_map, options, **kwargs):\n    _, _, _ = (object_map, tensor_map, options)\n    del kwargs\n    return []",
    "docstring": "Creates a copy of this object's tensors onto SavedModel graph. Needs to be overridden if the class contains tensors that must be saved into the graph. This method should update the and dictionaries. This method is called on all nodes in the Trackable Graph (generated by ). The nodes are traversed in the order defined by All usages of _map_resources should be migrated to this method. Args: object_map: A dictionary that maps original Trackables to the copied Trackables. This only needs to be updated if the object is a tf.function, or if the copied tensors are necessary for checkpointing this object. tensor_map: Dictionary mapping original tensors to copied tensors. options: A object. **kwargs: Additional kwargs that may be added at a later time. Returns: Flat list of original tensors that have been copied.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "FunctionDef name:_export_to_saved_model_graph arg:self arg:object_map arg:tensor_map arg:options arguments arg arg arg arg arg Assign Return return:no"
  },
  {
    "library": "pytorch",
    "name": "summarize_graph_break",
    "source_code": "def summarize_graph_break(filename):\n    log_file = f'{filename.rstrip('.csv')}_graph_breaks.csv'\n    if os.path.exists(log_file):\n        df = pd.read_csv(log_file)\n        df = df.sort_values('reason').drop_duplicates(subset='reason')\n        multi_tensor_sgd_row = df.loc[df['reason'].str.contains('_multi_tensor_sgd')]\n        if len(multi_tensor_sgd_row):\n            df = df[~df['reason'].str.contains('_multi_tensor_sgd')]\n            df = pd.concat([df, pd.DataFrame([multi_tensor_sgd_row.iloc[0]])], axis=0)\n        df.to_csv(f'{log_file.rstrip('.csv')}_deduped.csv', index=False)",
    "docstring": "Sorts and de-dupes the graphs breaks on the reason string. Note that this function is just a best effort to reduce the logging information. We could miss some graph breaks because of de-duping. We can further refine this function as need arises.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\dynamo\\common.py",
    "ast_data": "FunctionDef name:summarize_graph_break arg:filename arguments arg Assign Call If Call Assign Call Assign Call Call Assign Call If Call Assign Call Assign Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_rewrite_assign",
    "source_code": "def _rewrite_assign(tok: tuple[int, str]) -> tuple[int, str]:\n    toknum, tokval = tok\n    return (toknum, '==' if tokval == '=' else tokval)",
    "docstring": "Rewrite the assignment operator for PyTables expressions that use ``. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- tuple of int, str Either the input or token or the replacement values",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expr.py",
    "ast_data": "FunctionDef name:_rewrite_assign arg:tok arguments arg Assign Return return:yes Compare"
  },
  {
    "library": "django",
    "name": "_check_fix_default_value",
    "source_code": "def _check_fix_default_value(self):\n    if not self.has_default():\n        return []\n    value = self.default\n    if isinstance(value, datetime.datetime):\n        value = _to_naive(value).date()\n    elif isinstance(value, datetime.date):\n        pass\n    else:\n        return []\n    return self._check_if_value_fixed(value)",
    "docstring": "Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:_check_fix_default_value arg:self arguments arg If Call Return return:no Assign If Call Assign Call Call If Call Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "pack_output",
    "source_code": "def pack_output(self, flat_values: Sequence[core.Tensor]) -> Any:\n    if flat_values is None:\n        flat_values = []\n    if self.output is None:\n        raise ValueError('Can not pack outputs for undefined output type.')\n    else:\n        return self.output.from_tensors(iter(flat_values))",
    "docstring": "Packs flat tensors to generate a value of the output type.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:pack_output arg:self arg:flat_values arguments arg arg If Compare Assign If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_ReadGrad",
    "source_code": "@ops.RegisterGradient('ReadVariableOp')\ndef _ReadGrad(_, grad):\n    return grad",
    "docstring": "Gradient for read op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:_ReadGrad arg:_ arg:grad arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_xla_devices",
    "source_code": "@deprecated(None, 'XLA:CPU and XLA:GPU devices are deprecated', warn_once=True)\ndef enable_xla_devices(self):\n    pywrap_tfe.TF_EnableXlaDevices()",
    "docstring": "Enables XLA:CPU and XLA:GPU devices registration.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:enable_xla_devices arg:self arguments arg Call Call"
  },
  {
    "library": "authlib",
    "name": "generate_key",
    "source_code": "@classmethod\ndef generate_key(cls, key_size=256, options=None, is_private=True):\n    if not is_private:\n        raise ValueError('oct key can not be generated as public')\n    if key_size % 8 != 0:\n        raise ValueError('Invalid bit size for oct key')\n    return cls.import_key(secrets.token_bytes(int(key_size / 8)), options)",
    "docstring": "Generate a `` with the given bit size.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7518\\oct_key.py",
    "ast_data": "FunctionDef name:generate_key arg:cls arg:key_size arg:options arg:is_private arguments arg arg arg arg If Raise Call If Compare Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "reset_cache",
    "source_code": "@classmethod\ndef reset_cache(cls) -> None:\n    cls._cache_dtypes = {}",
    "docstring": "clear the cache",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:reset_cache arg:cls arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "output",
    "source_code": "def output(self, *args: T) -> None:\n    raise NotImplementedError",
    "docstring": "This is a fake op used in analysis but not codegen",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "FunctionDef name:output arg:self arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "__enter__",
    "source_code": "def __enter__(self):\n    if not self.enabled:\n        return\n    if self.entered:\n        raise RuntimeError('autograd profiler traces are not reentrant')\n    self.entered = True\n    profiler_kind = torch.autograd.ProfilerState.CUDA if self.use_cuda else torch.autograd.ProfilerState.CPU\n    profiler_config = torch.autograd.ProfilerConfig(profiler_kind, self.record_shapes, self.profile_memory, False, False, False, torch.profiler._ExperimentalConfig())\n    _enable_server_process_global_profiler(profiler_config)\n    return self",
    "docstring": "Turn on server-side process-global profiling. This enables thread-local profiler on all RPC threads running server-side request callbacks.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\rpc\\server_process_global_profiler.py",
    "ast_data": "FunctionDef name:__enter__ arg:self arguments arg If Return return:no If Raise Call Assign Assign Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assert_self_adjoint",
    "source_code": "def assert_self_adjoint(self, name='assert_self_adjoint'):\n    with self._name_scope(name):\n        return self._assert_self_adjoint()",
    "docstring": "Returns an that asserts this operator is self-adjoint. Here we check that this operator is *exactly* equal to its hermitian transpose. Args: name: A string name to prepend to created ops. Returns: An , that, when run, will raise an if the operator is not self-adjoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py",
    "ast_data": "FunctionDef name:assert_self_adjoint arg:self arg:name arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "EmptyMatchError",
    "source_code": "class EmptyMatchError(Exception):\n    pass",
    "docstring": "This is an exception that is thrown when a mock or extern is marked as ``, and is not matched with any module during packaging.",
    "type": "class",
    "file_path": "pytorch\\torch\\package\\package_exporter.py",
    "ast_data": "ClassDef name:EmptyMatchError"
  },
  {
    "library": "authlib",
    "name": "as_dict",
    "source_code": "def as_dict(self, is_private=False, **params):\n    tokens = self.tokens\n    if is_private and 'd' not in tokens:\n        raise ValueError('This is a public key')\n    kid = tokens.get('kid')\n    if 'd' in tokens and (not is_private):\n        tokens = {k: tokens[k] for k in tokens if k in self.PUBLIC_KEY_FIELDS}\n        tokens['kty'] = self.kty\n        if kid:\n            tokens['kid'] = kid\n    if not kid:\n        tokens['kid'] = self.thumbprint()\n    tokens.update(params)\n    return tokens",
    "docstring": "Represent this key as a dict of the JSON Web Key.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7517\\asymmetric_key.py",
    "ast_data": "FunctionDef name:as_dict arg:self arg:is_private arguments arg arg arg Assign If BoolOp Compare Raise Call Assign Call If BoolOp Compare Assign Compare Assign If Assign If Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "TimerWx",
    "source_code": "class TimerWx(TimerBase):\n\n    def __init__(self, *args, **kwargs):\n        self._timer = wx.Timer()\n        self._timer.Notify = self._on_timer\n        super().__init__(*args, **kwargs)\n\n    def _timer_start(self):\n        self._timer.Start(self._interval, self._single)\n\n    def _timer_stop(self):\n        self._timer.Stop()\n\n    def _timer_set_interval(self):\n        if self._timer.IsRunning():\n            self._timer_start()",
    "docstring": "Subclass of using wx.Timer events.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "ClassDef name:TimerWx FunctionDef name:__init__ arg:self arguments arg arg arg Assign Call Assign Call Call FunctionDef name:_timer_start arg:self arguments arg Call FunctionDef name:_timer_stop arg:self arguments arg Call FunctionDef name:_timer_set_interval arg:self arguments arg If Call Call"
  },
  {
    "library": "pytorch",
    "name": "tree_map_with_path",
    "source_code": "def tree_map_with_path(func: Callable[..., Any], tree: PyTree, *rests: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> PyTree:\n    raise NotImplementedError('KeyPaths are not yet supported in cxx_pytree.')",
    "docstring": "Like :func:, but the provided callable takes an additional key path argument. Args: func: A function that takes `True`.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:tree_map_with_path arg:func arg:tree arguments arg arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_params",
    "source_code": "def set_params(self, base=None, subs=None, *, numticks=None):\n    if base is not None:\n        self._base = float(base)\n    if subs is not None:\n        self._set_subs(subs)\n    if numticks is not None:\n        self.numticks = numticks",
    "docstring": "Set parameters within this locator.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:set_params arg:self arg:base arg:subs arguments arg arg arg arg If Compare Assign Call If Compare Call If Compare Assign"
  },
  {
    "library": "pytorch",
    "name": "apply",
    "source_code": "def apply(self, *model_args, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None=None, **model_kwargs) -> Sequence[int | float | bool | str | torch.Tensor | torch.dtype | None]:\n    args: Sequence[Any] = model_args\n    kwargs: Mapping[str, Any] = model_kwargs\n    for step in self._steps:\n        args, kwargs = step.apply(args, kwargs, model=model)\n    assert not kwargs\n    return args",
    "docstring": "Converts the PyTorch model inputs to exported ONNX model inputs format. Args: model_args: The PyTorch model inputs. model: The PyTorch model. model_kwargs: The PyTorch model keyword inputs. Returns: A sequence of tensors converted from PyTorch model inputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py",
    "ast_data": "FunctionDef name:apply arg:self arguments arg arg arg arg For Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "has_inf_or_nan",
    "source_code": "def has_inf_or_nan(datum, tensor):\n    _ = datum\n    if isinstance(tensor, InconvertibleTensorProto):\n        return False\n    elif np.issubdtype(tensor.dtype, np.floating) or np.issubdtype(tensor.dtype, np.complexfloating) or np.issubdtype(tensor.dtype, np.integer):\n        return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))\n    else:\n        return False",
    "docstring": "A predicate for whether a tensor consists of any bad numerical values. This predicate is common enough to merit definition in this module. Bad numerical values include s and s. The signature of this function follows the requirement of the method . Args: datum: () Datum metadata. tensor: ( or None) Value of the tensor. None represents an uninitialized tensor. Returns: () True if and only if tensor consists of any nan or inf values.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:has_inf_or_nan arg:datum arg:tensor arguments arg arg Assign If Call Return return:yes If BoolOp Call Call Call Return return:yes BoolOp Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, input_nodes: list[ir.IRNode], layout: ir.Layout, num_threads: int, register_blocking: GemmBlocking, beta: int=1, alpha: int=1, has_bias: bool=False, epilogue_creator: Optional[Callable[[ir.Buffer], ir.Pointwise]]=None, act_mapping: Optional[dict[int, ir.IRNode]]=None, gemm_grouped_num: int=1) -> None:\n    super().__init__(input_nodes, layout, num_threads, register_blocking, beta, alpha, has_bias, epilogue_creator)\n    self.act_mapping = act_mapping\n    self.gemm_grouped_num = gemm_grouped_num\n    self.output_node: list[ir.Buffer] = [ir.Buffer(name='buf_out' + str(idx), layout=layout) for idx in range(gemm_grouped_num)]",
    "docstring": "Template for Group of GEMMs: * Each GEMM has the same dimensions (m, n, k) and the same leading dimensions (lda, ldb, ldc) for their A, B, and C matrices. * Each GEMM has distinct or shared activations, has distinct weight, has unique bias or no bias, has distinct epilogues. * In the current implementation, the outputs of all GEMMs are accumulated using pointwise epilogues. This behavior can be extended in the future if needed.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_grouped_gemm_template.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:input_nodes arg:layout arg:num_threads arg:register_blocking arg:beta arg:alpha arg:has_bias arg:epilogue_creator arg:act_mapping arg:gemm_grouped_num arguments arg arg arg arg arg arg arg arg arg arg arg Call Call Assign Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_check_element_shape",
    "source_code": "def _check_element_shape(self, shape):\n    if not shape.is_compatible_with(self.element_shape):\n        raise ValueError('Inconsistent shapes: saw %s but expected %s ' % (shape, self.element_shape))\n    if self._infer_shape:\n        self._element_shape[0] = self.element_shape.merge_with(shape)",
    "docstring": "Changes the element shape of the array given a shape to merge with. Args: shape: A object to merge with. Raises: ValueError: if the provided shape is incompatible with the current element shape of the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:_check_element_shape arg:self arg:shape arguments arg arg If Call Raise Call If Assign Call"
  },
  {
    "library": "pytorch",
    "name": "is_running",
    "source_code": "def is_running(self):\n    return self._execution_trace_running",
    "docstring": "Returns True if the observer is running, otherwise False.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:is_running arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_shape_as_list",
    "source_code": "def _shape_as_list(self) -> list[int]:\n    return list(self._shape_tuple())",
    "docstring": "The shape of the tensor as a list.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_shape_as_list arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_float",
    "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n    assert type(mod) == cls._FLOAT_MODULE, ' qat.' + cls.__name__ + '.from_float only works for ' + cls._FLOAT_MODULE.__name__\n    assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n    assert mod.qconfig, 'Input float module must have a valid qconfig'\n    weight_qscheme = mod.qconfig.weight().qscheme\n    assert weight_qscheme == torch.per_channel_affine_float_qparams, 'Embedding weights requires a qscheme of torch.per_channel_affine_float_qparams Got ' + str(weight_qscheme)\n    qconfig = mod.qconfig\n    qat_embedding_bag = cls(mod.num_embeddings, mod.embedding_dim, mod.padding_idx, mod.max_norm, mod.norm_type, mod.scale_grad_by_freq, mod.sparse, mod.weight, qconfig=qconfig)\n    return qat_embedding_bag",
    "docstring": "Create a qat module from a float module Args: a float module, either produced by torch.ao.quantization utilities or directly from user",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\qat\\modules\\embedding_ops.py",
    "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Compare Call Call Assign Call Compare Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_named_tuple",
    "source_code": "def _is_named_tuple(instance):\n    if not isinstance(instance, tuple):\n        return False\n    return hasattr(instance, '_fields') and isinstance(instance._fields, collections_abc.Sequence) and all((isinstance(f, str) for f in instance._fields))",
    "docstring": "Returns True iff is a . Args: instance: An instance of a Python object. Returns: True if is a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "FunctionDef name:_is_named_tuple arg:instance arguments arg If Call Return return:yes Return return:yes BoolOp Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "stateless_random_uniform",
    "source_code": "@polymorphic_function.function\ndef stateless_random_uniform(shape, seed, layout):\n    return api.relayout(stateless_random_ops.stateless_random_uniform(shape=shape, seed=seed), layout=layout)",
    "docstring": "Creates uniform random tensor with the given layout.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\numpy_util.py",
    "ast_data": "FunctionDef name:stateless_random_uniform arg:shape arg:seed arg:layout arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "version_def",
    "source_code": "@property\ndef version_def(self) -> versions_pb2.VersionDef:\n    return versions_pb2.VersionDef(splitter_version=1, join_version=0, bad_consumers=version_lib.get_bad_versions())",
    "docstring": "Version info about the splitter and join implementation required.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py",
    "ast_data": "FunctionDef name:version_def arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "handle_raw_input",
    "source_code": "def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):\n    self.activated = content_length <= settings.FILE_UPLOAD_MAX_MEMORY_SIZE",
    "docstring": "Use the content_length to signal whether or not this handler should be used.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "FunctionDef name:handle_raw_input arg:self arg:input_data arg:META arg:content_length arg:boundary arg:encoding arguments arg arg arg arg arg arg Assign Compare"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, source_shape, target_shape, layer_broadcasters, dtype=None):\n    if not isinstance(source_shape, DynamicRaggedShape):\n        raise TypeError('source_shape is not a DynamicRaggedShape')\n    if not isinstance(target_shape, DynamicRaggedShape):\n        raise TypeError('target_shape is not a DynamicRaggedShape')\n    if not isinstance(layer_broadcasters, list):\n        raise TypeError('layer_broadcasters not a list: ' + str(layer_broadcasters))\n    for bc in layer_broadcasters:\n        if not isinstance(bc, _LayerBroadcaster):\n            raise TypeError('Not a LayerBroadcaster: ' + str(bc))\n    dtype = _find_dtype(source_shape, dtype)\n    dtype = _find_dtype(target_shape, dtype)\n    dtype = _find_dtype_iterable(layer_broadcasters, dtype)\n    dtype = _find_dtype(dtypes.int64, dtype)\n    self._source_shape = source_shape.with_dtype(dtype)\n    self._target_shape = target_shape.with_dtype(dtype)\n    self._layer_broadcasters = [x.with_dtype(dtype) for x in layer_broadcasters]",
    "docstring": "Create a broadcaster. Do not call directly. The source_shape, target_shape, and layer_broadcasters are converted to have the same dtype. Note: source_shape.rank and target_shape.rank must be known. Args: source_shape: the source DynamicRaggedShape target_shape: the target DynamicRaggedShape layer_broadcasters: List[_LayerBroadcaster] of length source_shape.rank. dtype: the preferred dtype of the broadcaster. Raises: TypeError: if the input types don't match.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:source_shape arg:target_shape arg:layer_broadcasters arg:dtype arguments arg arg arg arg arg If Call Raise Call If Call Raise Call If Call Raise Call Call For If Call Raise Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "cherrypy",
    "name": "stop",
    "source_code": "def stop(self):\n    os.popen('apache -k stop')\n    self.ready = False",
    "docstring": "Stop an Apache2/httpd server.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpmodpy.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "margin_ranking_loss",
    "source_code": "def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: float=0, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n    if has_torch_function_variadic(input1, input2, target):\n        return handle_torch_function(margin_ranking_loss, (input1, input2, target), input1, input2, target, margin=margin, size_average=size_average, reduce=reduce, reduction=reduction)\n    if size_average is not None or reduce is not None:\n        reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n    else:\n        reduction_enum = _Reduction.get_enum(reduction)\n    if input1.dim() != input2.dim() or input1.dim() != target.dim():\n        raise RuntimeError(f'margin_ranking_loss : All input tensors should have same dimension but got sizes: input1: {input1.size()}, input2: {input2.size()}, target: {target.size()} ')\n    return torch.margin_ranking_loss(input1, input2, target, margin, reduction_enum)",
    "docstring": "Compute the margin ranking loss. See :class: for details. Args: input1 (Tensor): Predicted values. input2 (Tensor): Predicted values. target (Tensor): Ground truth values. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. Returns: Tensor: Margin ranking loss.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:margin_ranking_loss arg:input1 arg:input2 arg:target arg:margin arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call If BoolOp Compare Call Call Compare Call Call Raise Call Call Call Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "get_js_stemmer_code",
    "source_code": "def get_js_stemmer_code(self) -> str:\n    if not self.lang.js_stemmer_rawcode:\n        return self.lang.js_stemmer_code\n    base_js_path = _MINIFIED_JS_PATH / 'base-stemmer.js'\n    language_js_path = _MINIFIED_JS_PATH / self.lang.js_stemmer_rawcode\n    return '\\n'.join((base_js_path.read_text(encoding='utf-8'), language_js_path.read_text(encoding='utf-8'), f'window.Stemmer = {self.lang.language_name}Stemmer;'))",
    "docstring": "Returns JS code that will be inserted into language_data.js.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\search\\__init__.py",
    "ast_data": "FunctionDef name:get_js_stemmer_code arg:self arguments arg If Return return:yes Assign Assign Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_route_params",
    "source_code": "def _route_params(self, *, params, method, parent, caller):\n    res = Bunch()\n    if self._self_request:\n        res.update(self._self_request._route_params(params=params, method=method, parent=parent, caller=caller))\n    param_names = self._get_param_names(method=method, return_alias=True, ignore_self_request=True)\n    child_params = {key: value for key, value in params.items() if key in param_names}\n    for key in set(res.keys()).intersection(child_params.keys()):\n        if child_params[key] is not res[key]:\n            raise ValueError(f'In {self.owner}, there is a conflict on {key} between what is requested for this estimator and what is requested by its children. You can resolve this conflict by using an alias for the child estimator(s) requested metadata.')\n    res.update(child_params)\n    return res",
    "docstring": "Prepare the given parameters to be passed to the method. This is used when a router is used as a child object of another router. The parent router then passes all parameters understood by the child object to it and delegates their validation to the child. The output of this method can be used directly as the input to the corresponding method as extra props. Parameters ---------- params : dict A dictionary of provided metadata. method : str The name of the method for which the parameters are requested and routed. parent : object Parent class object, that routes the metadata. caller : str Method from the parent class object, where the metadata is routed from. Returns ------- params : Bunch A :class: of {prop: value} which can be given to the corresponding method.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_route_params arg:self arguments arg arg arg arg arg Assign Call If Call Call Assign Call Assign Call Compare For Call Call Call Call If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "most_specific_common_supertype",
    "source_code": "def most_specific_common_supertype(self, others: Sequence[trace.TraceType]) -> Optional['TensorShape']:\n    if any((not isinstance(other, TensorShape) for other in others)):\n        return None\n    if self.rank is None:\n        return unknown_shape()\n    if any((other.dims is None or self.rank != other.rank for other in others)):\n        return unknown_shape()\n    dims = [dim if all((dim == other._dims[i] for other in others)) else None for i, dim in enumerate(self._dims)]\n    return TensorShape(dims)",
    "docstring": "Returns the most specific supertype of self and others. * is the most specific supertyping both and . Note that is also a supertype but it is not \"most specific\". * is the most specific supertyping both and ). There are other less specific TensorShapes that supertype above mentioned TensorShapes, e.g. , . * is the most specific supertyping both and . As always, is also a supertype but not the most specific one. * ) is the only supertyping both and . In general, any two shapes that have different ranks will only have as a common supertype. * is the only supertyping both and . In general, the common supertype of any shape with is . Args: others: Sequence of . Returns: A which is the most specific supertype shape of and . None if it does not exist.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:most_specific_common_supertype arg:self arg:others arguments arg arg If Call Call Return return:no If Compare Return return:yes Call If Call BoolOp Compare Compare Return return:yes Call Assign Call Compare Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self, attributes=['_label_binarizer'])\n    if self._label_binarizer.y_type_.startswith('multilabel'):\n        scores = 2 * (self.decision_function(X) > 0) - 1\n        return self._label_binarizer.inverse_transform(scores)\n    return super().predict(X)",
    "docstring": "Predict class labels for samples in . Parameters ---------- X : {array-like, spare matrix} of shape (n_samples, n_features) The data matrix for which we want to predict the targets. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs) Vector or matrix containing the predictions. In binary and multiclass problems, this is a vector containing . In a multilabel problem, it returns a matrix of shape .",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call If Call Assign Compare Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "load",
    "source_code": "@classmethod\ndef load(self, input):\n    is_file = isinstance(input, text_or_bytes) or hasattr(input, 'read')\n    return Parser().dict_from_file(input) if is_file else input.copy()",
    "docstring": "Resolve 'input' to dict from a dict, file, or filename.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:load arg:self arg:input arguments arg arg Assign BoolOp Call Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ScaledTranslation",
    "source_code": "class ScaledTranslation(Affine2DBase):\n\n    def __init__(self, xt, yt, scale_trans, **kwargs):\n        super().__init__(**kwargs)\n        self._t = (xt, yt)\n        self._scale_trans = scale_trans\n        self.set_children(scale_trans)\n        self._mtx = None\n        self._inverted = None\n    __str__ = _make_str_method('_t')\n\n    def get_matrix(self):\n        if self._invalid:\n            self._mtx = IdentityTransform._mtx.copy()\n            self._mtx[:2, 2] = self._scale_trans.transform(self._t)\n            self._invalid = 0\n            self._inverted = None\n        return self._mtx",
    "docstring": "A transformation that translates by *xt* and *yt*, after *xt* and *yt* have been transformed by *scale_trans*.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "ClassDef name:ScaledTranslation FunctionDef name:__init__ arg:self arg:xt arg:yt arg:scale_trans arguments arg arg arg arg arg Call Call Assign Assign Call Assign Assign Assign Call FunctionDef name:get_matrix arg:self arguments arg If Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "unit",
    "source_code": "@staticmethod\ndef unit():\n    return Bbox([[0, 0], [1, 1]])",
    "docstring": "Create a new unit from (0, 0) to (1, 1).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:unit arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_pyval_find_struct_keys_and_depth",
    "source_code": "def _pyval_find_struct_keys_and_depth(pyval, keys):\n    if isinstance(pyval, dict):\n        keys.update(pyval.keys())\n        return 0\n    elif isinstance(pyval, (list, tuple)):\n        depth = None\n        for child in pyval:\n            child_depth = _pyval_find_struct_keys_and_depth(child, keys)\n            if child_depth is not None:\n                if depth is None:\n                    depth = child_depth + 1\n                elif depth != child_depth + 1:\n                    raise ValueError('Inconsistent depth of dictionaries')\n        return depth\n    else:\n        return None",
    "docstring": "Finds the keys & depth of nested dictionaries in . Args: pyval: A nested structure of lists, tuples, and dictionaries. keys: (output parameter) A set, which will be updated with any keys that are found in the nested dictionaries. Returns: The nesting depth of dictionaries in , or if does not contain any dictionaries. Raises: ValueError: If dictionaries have inconsistent depth.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_pyval_find_struct_keys_and_depth arg:pyval arg:keys arguments arg arg If Call Call Call Return return:yes If Call Assign For Assign Call If Compare If Compare Assign If Compare Raise Call Return return:yes Return return:no"
  },
  {
    "library": "django",
    "name": "get_constraints",
    "source_code": "def get_constraints(self, cursor, table_name):\n    raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_constraints() method')",
    "docstring": "Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. Return a dict mapping constraint names to their attributes, where attributes is a dict with keys: * columns: List of columns this covers * primary_key: True if primary key, False otherwise * unique: True if this is a unique constraint, False otherwise * foreign_key: (table, column) of target, or None * check: True if check constraint, False otherwise * index: True if index, False otherwise. * orders: The order (ASC/DESC) defined for the columns of indexes * type: The type of the index (btree, hash, etc.) Some backends may return special constraint names that don't exist if they don't name constraints of a certain type (e.g. SQLite)",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\introspection.py",
    "ast_data": "FunctionDef name:get_constraints arg:self arg:cursor arg:table_name arguments arg arg arg Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "__call__",
    "source_code": "def __call__(self, *args, **kwargs):\n    if hasattr(request, 'app') and hasattr(request.app, 'log'):\n        log = request.app.log\n    else:\n        log = self\n    return log.error(*args, **kwargs)",
    "docstring": "Log the given message to the app.log or global log. Log the given message to the app.log or global log as appropriate.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\__init__.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg arg arg If BoolOp Call Call Assign Assign Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "remove",
    "source_code": "def remove(self):\n    pass",
    "docstring": "Nothing to remove.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\app_data\\na.py",
    "ast_data": "FunctionDef name:remove arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "LintMessage",
    "source_code": "@dc.dataclass\nclass LintMessage:\n    code: str\n    name: str\n    severity: LintSeverity\n    char: int | None = None\n    description: str | None = None\n    line: int | None = None\n    original: str | None = None\n    path: str | None = None\n    replacement: str | None = None\n    asdict = dc.asdict",
    "docstring": "This is a datatype representation of the JSON that gets sent to lintrunner as described here:",
    "type": "class",
    "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py",
    "ast_data": "ClassDef name:LintMessage Assign"
  },
  {
    "library": "cherrypy",
    "name": "encode",
    "source_code": "def encode(value):\n    for chunk in _encode(value):\n        yield chunk.encode('utf-8')",
    "docstring": "Encode to bytes.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_json.py",
    "ast_data": "FunctionDef name:encode arg:value arguments arg For Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X, copy=True):\n    check_is_fitted(self)\n    X = validate_data(self, X, copy=copy, dtype=FLOAT_DTYPES, reset=False)\n    X -= self._x_mean\n    y_pred = X @ self.coef_.T + self.intercept_\n    return y_pred.ravel() if self._predict_1d else y_pred",
    "docstring": "Predict targets of given samples. Parameters ---------- X : array-like of shape (n_samples, n_features) Samples. copy : bool, default=True Whether to copy or perform in-place normalization. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets) Returns predicted values. Notes ----- This call requires the estimation of a matrix of shape , which may be an issue in high dimensional space.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arg:copy arguments arg arg arg Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_extra_locals",
    "source_code": "def get_extra_locals(self):\n    raise NotImplementedError('subclasses must override this')",
    "docstring": "Returns extra static local variables to be made to transformed code. Subclasses must override this. Returns: extra_locals: A Dict[Text, Any] containing additional variables to make available to the transformed code.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py",
    "ast_data": "FunctionDef name:get_extra_locals arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "_ev1d",
    "source_code": "def _ev1d(self, j, n):\n    if self.boundary_conditions == 'dirichlet':\n        i = np.pi * (np.arange(n) + 1) / (n + 1)\n        ev = np.sqrt(2.0 / (n + 1.0)) * np.sin(i * (j + 1))\n    elif self.boundary_conditions == 'neumann':\n        i = np.pi * (np.arange(n) + 0.5) / n\n        ev = np.sqrt((1.0 if j == 0 else 2.0) / n) * np.cos(i * j)\n    elif j == 0:\n        ev = np.sqrt(1.0 / n) * np.ones(n)\n    elif j + 1 == n and n % 2 == 0:\n        ev = np.sqrt(1.0 / n) * np.tile([1, -1], n // 2)\n    else:\n        i = 2.0 * np.pi * (np.arange(n) + 0.5) / n\n        ev = np.sqrt(2.0 / n) * np.cos(i * np.floor((j + 1) / 2))\n    ev[np.abs(ev) < np.finfo(np.float64).eps] = 0.0\n    return ev",
    "docstring": "Return 1 eigenvector in 1d with index and number of grid points where ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:_ev1d arg:self arg:j arg:n arguments arg arg arg If Compare Assign Call Assign Call Call If Compare Assign Call Assign Call Compare Call If Compare Assign Call Call If BoolOp Compare Compare Assign Call Call Assign Call Assign Call Call Call Assign Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "EMPTY_NN_MODULE_HOOKS_DICT",
    "source_code": "def EMPTY_NN_MODULE_HOOKS_DICT(self, guard):\n    if config.skip_nnmodule_hook_guards:\n        return\n    self.SEQUENCE_LENGTH(guard)",
    "docstring": "Special guard to skip guards on empty hooks. This is controlled by skip_nnmodule_hook_guards",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\guards.py",
    "ast_data": "FunctionDef name:EMPTY_NN_MODULE_HOOKS_DICT arg:self arg:guard arguments arg arg If Return return:no Call"
  },
  {
    "library": "tensorflow",
    "name": "most_specific_common_subtype",
    "source_code": "def most_specific_common_subtype(self, others: Sequence['FunctionType']) -> Optional['FunctionType']:\n    subtyped_parameters = []\n    for i, parameter in enumerate(self.parameters.values()):\n        subtyped_parameter = parameter.most_specific_common_supertype([list(other.parameters.values())[i] for other in others])\n        if subtyped_parameter is None:\n            return None\n        subtyped_parameters.append(subtyped_parameter)\n    if not all(subtyped_parameters):\n        return None\n    capture_names = set(self.captures.keys())\n    for other in others:\n        capture_names = capture_names.union(other.captures.keys())\n    subtyped_captures = collections.OrderedDict()\n    for name in capture_names:\n        containing = [t for t in [self, *others] if name in t.captures]\n        base = containing[0]\n        relevant_others = containing[1:]\n        common_type = base.captures[name].most_specific_common_supertype([other.captures[name] for other in relevant_others])\n        if common_type is None:\n            return None\n        else:\n            subtyped_captures[name] = common_type\n    return FunctionType(subtyped_parameters, subtyped_captures)",
    "docstring": "Returns a common subtype (if exists).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:most_specific_common_subtype arg:self arg:others arguments arg arg Assign For Call Call Assign Call Call Call If Compare Return return:no Call If Call Return return:no Assign Call Call For Assign Call Call Assign Call For Assign Compare Assign Assign Assign Call If Compare Return return:no Assign Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "move_to_back",
    "source_code": "def move_to_back(self, sprite):\n    self.change_layer(sprite, self.get_bottom_layer() - 1)",
    "docstring": "move the sprite to the bottom layer LayeredUpdates.move_to_back(sprite): return None Moves the sprite to the bottom layer by moving it to a new layer below the current bottom layer.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:move_to_back arg:self arg:sprite arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_module_name",
    "source_code": "def set_module_name(self, module_name: str, qconfig: QConfigAny) -> QConfigMapping:\n    self.module_name_qconfigs[module_name] = qconfig\n    return self",
    "docstring": "Set the QConfig for modules matching the given module name. If the QConfig for an existing module name was already set, the new QConfig will override the old one.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\qconfig_mapping.py",
    "ast_data": "FunctionDef name:set_module_name arg:self arg:module_name arg:qconfig arguments arg arg arg Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "rgb_to_lab",
    "source_code": "def rgb_to_lab(image: torch.Tensor) -> torch.Tensor:\n    if not isinstance(image, torch.Tensor):\n        raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    lin_rgb = rgb_to_linear_rgb(image)\n    xyz_im: torch.Tensor = rgb_to_xyz(lin_rgb)\n    xyz_ref_white = torch.tensor([0.95047, 1.0, 1.08883], device=xyz_im.device, dtype=xyz_im.dtype)[..., :, None, None]\n    xyz_normalized = torch.div(xyz_im, xyz_ref_white)\n    threshold = 0.008856\n    power = torch.pow(xyz_normalized.clamp(min=threshold), 1 / 3.0)\n    scale = 7.787 * xyz_normalized + 4.0 / 29.0\n    xyz_int = torch.where(xyz_normalized > threshold, power, scale)\n    x: torch.Tensor = xyz_int[..., 0, :, :]\n    y: torch.Tensor = xyz_int[..., 1, :, :]\n    z: torch.Tensor = xyz_int[..., 2, :, :]\n    L: torch.Tensor = 116.0 * y - 16.0\n    a: torch.Tensor = 500.0 * (x - y)\n    _b: torch.Tensor = 200.0 * (y - z)\n    out: torch.Tensor = torch.stack([L, a, _b], dim=-3)\n    return out",
    "docstring": "Convert a RGB image to Lab. .. image:: _static/img/rgb_to_lab.png The input RGB image is assumed to be in the range of :math:. Lab color is computed using the D65 illuminant and Observer 2. Args: image: RGB Image to be converted to Lab with shape :math:. Returns: Lab version of the image with shape :math:. The L channel values are in the range 0..100. a and b are in the range -128..127. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_lab(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\lab.py",
    "ast_data": "FunctionDef name:rgb_to_lab arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Assign Call Call Assign Call Assign Call Assign Assign Call Call Assign Assign Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "forward_index",
    "source_code": "@property\ndef forward_index(self):\n    return self._forward_index",
    "docstring": "The loop index of forward loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:forward_index arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "DispatchContext",
    "source_code": "class DispatchContext(collections.namedtuple('DispatchContext', ('options',))):\n\n    def option(self, name):\n        return self.options[name]",
    "docstring": "Allows passing additional parameters to the specific implementations. Attributes: options: Optional dict of extra arguments that may be required by specific implementations.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\dispatch_context.py",
    "ast_data": "ClassDef name:DispatchContext Call FunctionDef name:option arg:self arg:name arguments arg arg Return return:yes"
  },
  {
    "library": "django",
    "name": "ExtentField",
    "source_code": "class ExtentField(Field):\n    description = _('Extent Aggregate Field')\n\n    def get_internal_type(self):\n        return 'ExtentField'\n\n    def select_format(self, compiler, sql, params):\n        select = compiler.connection.ops.select_extent\n        return (select % sql if select else sql, params)",
    "docstring": "Used as a return value from an extent aggregate",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\fields.py",
    "ast_data": "ClassDef name:ExtentField Assign Call FunctionDef name:get_internal_type arg:self arguments arg Return return:yes FunctionDef name:select_format arg:self arg:compiler arg:sql arg:params arguments arg arg arg arg Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "devices",
    "source_code": "def devices(self):\n    return [cuda.Device(i) for i in range(cuda.runtime.getDeviceCount())]",
    "docstring": "The devices supported by CuPy. Returns ------- devices : list[Device] The devices supported by CuPy. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\cupy\\_info.py",
    "ast_data": "FunctionDef name:devices arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_asarray_validated",
    "source_code": "def _asarray_validated(a, check_finite=True, sparse_ok=False, objects_ok=False, mask_ok=False, as_inexact=False):\n    if not sparse_ok:\n        if issparse(a):\n            msg = 'Sparse arrays/matrices are not supported by this function. Perhaps one of the `scipy.sparse.linalg` functions would work instead.'\n            raise ValueError(msg)\n    if not mask_ok:\n        if np.ma.isMaskedArray(a):\n            raise ValueError('masked arrays are not supported')\n    toarray = np.asarray_chkfinite if check_finite else np.asarray\n    a = toarray(a)\n    if not objects_ok:\n        if a.dtype is np.dtype('O'):\n            raise ValueError('object arrays are not supported')\n    if as_inexact:\n        if not np.issubdtype(a.dtype, np.inexact):\n            a = toarray(a, dtype=np.float64)\n    return a",
    "docstring": "Helper function for SciPy argument validation. Many SciPy linear algebra functions do support arbitrary array-like input arguments. Examples of commonly unsupported inputs include matrices containing inf/nan, sparse matrix representations, and matrices with complicated elements. Parameters ---------- a : array_like The array-like input. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True sparse_ok : bool, optional True if scipy sparse matrices are allowed. objects_ok : bool, optional True if arrays with dype('O') are allowed. mask_ok : bool, optional True if masked arrays are allowed. as_inexact : bool, optional True to convert the input array to a np.inexact dtype. Returns ------- ret : ndarray The converted validated array.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "FunctionDef name:_asarray_validated arg:a arg:check_finite arg:sparse_ok arg:objects_ok arg:mask_ok arg:as_inexact arguments arg arg arg arg arg arg If If Call Assign Raise Call If If Call Raise Call Assign Assign Call If If Compare Call Raise Call If If Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_kwarg_as_str_attr",
    "source_code": "def _get_kwarg_as_str_attr(attr_name, value):\n    if isinstance(value, str):\n        return attr_value_pb2.AttrValue(s=compat.as_bytes(value))\n    else:\n        raise ValueError(f'Attribute {attr_name} must be str. Got {type(value)}.')",
    "docstring": "Creates an AttrValue for a python object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:_get_kwarg_as_str_attr arg:attr_name arg:value arguments arg arg If Call Return return:yes Call Call Raise Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_generate_hypercube",
    "source_code": "def _generate_hypercube(samples, dimensions, rng):\n    if dimensions > 30:\n        return np.hstack([rng.randint(2, size=(samples, dimensions - 30)), _generate_hypercube(samples, 30, rng)])\n    out = sample_without_replacement(2 ** dimensions, samples, random_state=rng).astype(dtype='>u4', copy=False)\n    out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]\n    return out",
    "docstring": "Returns distinct binary samples of length dimensions.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_samples_generator.py",
    "ast_data": "FunctionDef name:_generate_hypercube arg:samples arg:dimensions arg:rng arguments arg arg arg If Compare Return return:yes Call Call Call Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "watch_gradients_by_tensor_names",
    "source_code": "def watch_gradients_by_tensor_names(self, graph, tensor_name_regex):\n    tensor_name_pattern = re.compile(tensor_name_regex)\n    with graph.as_default():\n        for op in graph.get_operations():\n            for output in op.outputs:\n                if tensor_name_pattern.match(output.name):\n                    debug_op = self.identify_gradient(output)\n                    for consumer in list(output.consumers()):\n                        if consumer == debug_op.op:\n                            continue\n                        for i, consumer_input in enumerate(consumer.inputs):\n                            if consumer_input == output:\n                                consumer._update_input(i, debug_op)\n    return self",
    "docstring": "Watch gradient tensors by name(s) of the x-tensor(s). The side effect of this method is that when gradient tensor(s) are created with respect to the x-tensors, the gradient tensor(s) will be registered with this instance and can later be retrieved. Unlike the method, this method is used after the construction of the forward graph has completed. Unlike the method, this method does not use handles to the tensors of interest; it uses their names. This method is the same as except that the x-tensors are specified by name patterns, instead of or objects. Example: Args: graph: the to watch the gradients on. tensor_name_regex: the regular-expression pattern of the name(s) of the x-tensor(s) to watch. x-tensor refers to the tensors on the denominator of the differentiation. Returns: The GradientsDebugger instance itself.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py",
    "ast_data": "FunctionDef name:watch_gradients_by_tensor_names arg:self arg:graph arg:tensor_name_regex arguments arg arg arg Assign Call With Call For Call For If Call Assign Call For Call Call If Compare For Call If Compare Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "sheet_names",
    "source_code": "@property\ndef sheet_names(self) -> list[str]:\n    from odf.table import Table\n    tables = self.book.getElementsByType(Table)\n    return [t.getAttribute('name') for t in tables]",
    "docstring": "Return a list of sheet names present in the document",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_odfreader.py",
    "ast_data": "FunctionDef name:sheet_names arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_verts",
    "source_code": "def set_verts(self, verts, closed=True):\n    self._get_vector(verts)\n    super().set_verts([], False)\n    self._closed = closed",
    "docstring": "Set 3D vertices. Parameters ---------- verts : list of (N, 3) array-like The sequence of polygons [*verts0*, *verts1*, ...] where each element *verts_i* defines the vertices of polygon *i* as a 2D array-like of shape (N, 3). closed : bool, default: True Whether the polygon should be closed by adding a CLOSEPOLY connection at the end.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:set_verts arg:self arg:verts arg:closed arguments arg arg arg Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "num_subgraphs",
    "source_code": "def num_subgraphs(self):\n    return self._interpreter.NumSubgraphs()",
    "docstring": "Returns the number of subgraphs in the model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py",
    "ast_data": "FunctionDef name:num_subgraphs arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "KeyTransformTextLookupMixin",
    "source_code": "class KeyTransformTextLookupMixin:\n\n    def __init__(self, key_transform, *args, **kwargs):\n        if not isinstance(key_transform, KeyTransform):\n            raise TypeError('Transform should be an instance of KeyTransform in order to use this lookup.')\n        key_text_transform = KeyTextTransform(key_transform.key_name, *key_transform.source_expressions, **key_transform.extra)\n        super().__init__(key_text_transform, *args, **kwargs)",
    "docstring": "Mixin for combining with a lookup expecting a text lhs from a JSONField key lookup. On PostgreSQL, make use of the ->> operator instead of casting key values to text and performing the lookup on the resulting representation.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\fields\\json.py",
    "ast_data": "ClassDef name:KeyTransformTextLookupMixin FunctionDef name:__init__ arg:self arg:key_transform arguments arg arg arg arg If Call Raise Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_initialize",
    "source_code": "def _initialize(self, args, kwds, add_initializers_to=None):\n    created_variables = []\n\n    def variable_capturing_scope(next_creator, **kwds):\n        enable_variable_lifting = kwds.get('experimental_enable_variable_lifting')\n        if enable_variable_lifting is None:\n            enable_variable_lifting = True\n        if not enable_variable_lifting:\n            return next_creator(**kwds)\n        v = UnliftedInitializerVariable(add_initializers_to=add_initializers_to, **kwds)\n        created_variables.append(weakref.ref(v))\n        return v\n    self._created_variables = created_variables\n    self._variable_creation_config = self._generate_scoped_tracing_options(variable_capturing_scope, tracing_compilation.ScopeType.VARIABLE_CREATION)\n    self._concrete_variable_creation_fn = tracing_compilation.trace_function(args, kwds, self._variable_creation_config)\n\n    def invalid_creator_scope(*unused_args, **unused_kwds):\n        raise ValueError('tf.function only supports singleton tf.Variables created on the first call. Make sure the tf.Variable is only created once or created outside tf.function. See https://www.tensorflow.org/guide/function#creating_tfvariables for more information.')\n    self._no_variable_creation_config = self._generate_scoped_tracing_options(invalid_creator_scope, tracing_compilation.ScopeType.NO_VARIABLE_CREATION)",
    "docstring": "Initializes, on the first call. Creates two s, one that will allow creation of variables and one that won't. Additionally runs a trace for the that allows creation of variables. Args: args: Arguments to the underlying python callable. kwds: Keyword arguments to the python callable. add_initializers_to: Where to collect variable initializers, if not None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:_initialize arg:self arg:args arg:kwds arg:add_initializers_to arguments arg arg arg arg Assign FunctionDef name:variable_capturing_scope arg:next_creator arguments arg arg Assign Call If Compare Assign If Return return:yes Call Assign Call Call Call Return return:yes Assign Assign Call Assign Call FunctionDef name:invalid_creator_scope arguments arg arg Raise Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "_call_previousnext",
    "source_code": "def _call_previousnext(self, x_new):\n    x_new_indices = searchsorted(self._x_shift, x_new, side=self._side)\n    x_new_indices = x_new_indices.clip(1 - self._ind, len(self.x) - self._ind).astype(intp)\n    y_new = self._y[x_new_indices + self._ind - 1]\n    return y_new",
    "docstring": "Use previous/next neighbor of x_new, y_new = f(x_new).",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:_call_previousnext arg:self arg:x_new arguments arg arg Assign Call Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "str2bool",
    "source_code": "def str2bool(value):\n    value = value.upper()\n    if value == 'TRUE':\n        return True\n    elif value == 'FALSE':\n        return False\n    else:\n        raise ValueError('Invalid boolean')",
    "docstring": "Tries to transform a string supposed to represent a boolean to a boolean. Parameters ---------- value : str The string that is transformed to a boolean. Returns ------- boolval : bool The boolean representation of . Raises ------ ValueError If the string is not 'True' or 'False' (case independent) Examples -------- >>> import numpy as np >>> np.lib._iotools.str2bool('TRUE') True >>> np.lib._iotools.str2bool('false') False",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_iotools.py",
    "ast_data": "FunctionDef name:str2bool arg:value arguments arg Assign Call If Compare Return return:yes If Compare Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "SVTimerCheckpointThread",
    "source_code": "class SVTimerCheckpointThread(coordinator.LooperThread):\n\n    def __init__(self, sv, sess):\n        super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)\n        self._sv = sv\n        self._sess = sess\n\n    def run_loop(self):\n        logging.info('Saving checkpoint to path %s', self._sv.save_path)\n        self._sv.saver.save(self._sess, self._sv.save_path, global_step=self._sv.global_step)\n        if self._sv.summary_writer and self._sv.global_step is not None:\n            current_step = training_util.global_step(self._sess, self._sv.global_step)\n            self._sv.summary_writer.add_session_log(SessionLog(status=SessionLog.CHECKPOINT, checkpoint_path=self._sv.save_path), current_step)",
    "docstring": "A thread to checkpoint on a timer.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "ClassDef name:SVTimerCheckpointThread FunctionDef name:__init__ arg:self arg:sv arg:sess arguments arg arg arg Call Call Assign Assign FunctionDef name:run_loop arg:self arguments arg Call Call If BoolOp Compare Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "graph_pool_handle",
    "source_code": "def graph_pool_handle():\n    return _graph_pool_handle()",
    "docstring": "Return an opaque token representing the id of a graph memory pool. See :ref:. .. warning:: This API is in beta and may change in future releases.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\graphs.py",
    "ast_data": "FunctionDef name:graph_pool_handle arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "disable_mixed_precision_graph_rewrite_v1",
    "source_code": "@deprecation.deprecated_endpoints('train.experimental.disable_mixed_precision_graph_rewrite')\n@tf_export(v1=['mixed_precision.disable_mixed_precision_graph_rewrite', 'train.experimental.disable_mixed_precision_graph_rewrite'])\ndef disable_mixed_precision_graph_rewrite_v1():\n    if not mixed_precision_global_state.is_mixed_precision_graph_rewrite_enabled():\n        tf_logging.warn('disable_mixed_precision_graph_rewrite() called when mixed precision is already disabled.')\n    config.set_optimizer_experimental_options({'auto_mixed_precision': False})\n    mixed_precision_global_state.set_mixed_precision_graph_rewrite_enabled(False)",
    "docstring": "Disables the mixed precision graph rewrite. After this is called, the mixed precision graph rewrite will no longer run for new Sessions, and so float32 operations will no longer be converted to float16 in such Sessions. However, any existing Sessions will continue to have the graph rewrite enabled if they were created after was called but before was called. This does not undo the effects of loss scaling. Any optimizers wrapped with a LossScaleOptimizer will continue to do loss scaling, although this loss scaling will no longer be useful if the optimizer is used in new Sessions, as the graph rewrite no longer converts the graph to use float16. This function is useful for unit testing. A unit tests can test using the mixed precision graph rewrite, then disable it so future unit tests continue using float32. If this is done, unit tests should not share a single session, as and have no effect on existing sessions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\mixed_precision.py",
    "ast_data": "FunctionDef name:disable_mixed_precision_graph_rewrite_v1 arguments If Call Call Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "reset",
    "source_code": "def reset(self) -> 'Sobol':\n    super().reset()\n    self._quasi = self._shift.copy()\n    return self",
    "docstring": "Reset the engine to base state. Returns ------- engine : Sobol Engine reset to its base state.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg Call Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "id_for_label",
    "source_code": "@property\ndef id_for_label(self):\n    widget = self.field.widget\n    id_ = widget.attrs.get('id') or self.auto_id\n    return widget.id_for_label(id_)",
    "docstring": "Wrapper around the field widget's method. Useful, for example, for focusing on this field regardless of whether it has a single widget or a MultiWidget.",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:id_for_label arg:self arguments arg Assign Assign BoolOp Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "detect",
    "source_code": "@torch.inference_mode()\ndef detect(self, images: Tensor, n: Optional[int]=10000, apply_imagenet_normalization: bool=True, pad_if_not_divisible: bool=True, crop_h: Optional[int]=None, crop_w: Optional[int]=None) -> Tuple[Tensor, Tensor]:\n    KORNIA_CHECK_SHAPE(images, ['B', '3', 'H', 'W'])\n    self.train(False)\n    B, C, H, W = images.shape\n    if pad_if_not_divisible:\n        h, w = images.shape[2:]\n        pd_h = 14 - h % 14 if h % 14 > 0 else 0\n        pd_w = 14 - w % 14 if w % 14 > 0 else 0\n        images = torch.nn.functional.pad(images, (0, pd_w, 0, pd_h), value=0.0)\n    if apply_imagenet_normalization:\n        images = self.normalizer(images)\n    logits = self.detector.forward(images)\n    logits = logits[..., :H, :W]\n    if crop_h is not None and crop_w is not None:\n        logits = logits[..., :crop_h, :crop_w]\n        H, W = (crop_h, crop_w)\n    scoremap = logits.reshape(B, H * W).softmax(dim=-1).reshape(B, H, W)\n    keypoints, confidence = sample_keypoints(scoremap, num_samples=n)\n    return (keypoints, confidence)",
    "docstring": "Detect keypoints in the input images. Args: images: A tensor of shape :math: containing the input images. n: The number of keypoints to detect. apply_imagenet_normalization: Whether to apply ImageNet normalization to the input images. pad_if_not_divisible: pad image shape if not evenly divisible. crop_h: The height of the crop to be used for detection. If None, the full image is used. crop_w: The width of the crop to be used for detection. If None, the full image is used. Returns: keypoints: A tensor of shape :math: containing the detected keypoints, normalized to the range :math:. scores: A tensor of shape :math: containing the scores of the detected keypoints.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\dedode\\dedode.py",
    "ast_data": "FunctionDef name:detect arg:self arg:images arg:n arg:apply_imagenet_normalization arg:pad_if_not_divisible arg:crop_h arg:crop_w arguments arg arg arg arg arg arg arg Call Call Assign If Assign Assign Compare Assign Compare Assign Call If Assign Call Assign Call Assign If BoolOp Compare Compare Assign Assign Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_skip_if_dtype",
    "source_code": "def _skip_if_dtype(arg):\n    if isinstance(arg, str):\n        return None\n    if type(arg) is type:\n        return None if issubclass(arg, np.generic) else arg\n    else:\n        return None if isinstance(arg, np.dtype) else arg",
    "docstring": "'array or dtype' polymorphism. Return None for np.int8, dtype('float32') or 'f' etc arg for np.empty(3) etc",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_ni_support.py",
    "ast_data": "FunctionDef name:_skip_if_dtype arg:arg arguments arg If Call Return return:no If Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "def step(self, closure: Optional[Callable[[], float]]=None) -> Optional[float]:\n    raise NotImplementedError",
    "docstring": "Perform a single optimization step to update parameter. Args: closure (Callable): A closure that reevaluates the model and returns the loss. Optional for most optimizers.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:step arg:self arg:closure arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "get_compiler_ir",
    "source_code": "def get_compiler_ir(self, device_name, platform_name, function_name, flat_args, captured_inputs, stage='hlo'):\n    return pywrap_tfe.TF_GetCompilerIr(self._context_handle, function_name, stage, device_name, flat_args, captured_inputs, platform_name)",
    "docstring": "Get the compiler IR bytes. Args: device_name: The name of the device with the form as \"/job:localhost/replica:0/task:0/device:CPU:0\", \"/device:TPU:0\" etc. When this is used, actual device is needed for getting the compiler IR. platform_name: The name of the platform, e.g. \"TPU\". When this is used, first we find a device whose name contains the platform, if it is found we get the compiler IR by device. Otherwise the compiler IR is obtained as if using that device. The former logic of falling back to device is necessary, as there are cases of TF variables that need to access devices, but the upper layer may generally choose platform for getting compiler IR in a device-agnostic way. function_name: The name of the function to get the compiler IR. flat_args: The flat argument inputs. captured_inputs: The inputs that are captured. stage: The exported stage for the given function. Returns: The compiler IR bytes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:get_compiler_ir arg:self arg:device_name arg:platform_name arg:function_name arg:flat_args arg:captured_inputs arg:stage arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "collide_rect_ratio",
    "source_code": "class collide_rect_ratio:\n\n    def __init__(self, ratio):\n        self.ratio = ratio\n\n    def __repr__(self):\n        return '<{klass} @{id:x} {attrs}>'.format(klass=self.__class__.__name__, id=id(self) & 16777215, attrs=' '.join((f'{k}={v!r}' for k, v in self.__dict__.items())))\n\n    def __call__(self, left, right):\n        ratio = self.ratio\n        leftrect = left.rect\n        width = leftrect.width\n        height = leftrect.height\n        leftrect = leftrect.inflate(width * ratio - width, height * ratio - height)\n        rightrect = right.rect\n        width = rightrect.width\n        height = rightrect.height\n        rightrect = rightrect.inflate(width * ratio - width, height * ratio - height)\n        return leftrect.colliderect(rightrect)",
    "docstring": "A callable class that checks for collisions using scaled rects The class checks for collisions between two sprites using a scaled version of the sprites' rects. Is created with a ratio; the instance is then intended to be passed as a collided callback function to the *collide functions. New in pygame 1.8.1",
    "type": "class",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "ClassDef name:collide_rect_ratio FunctionDef name:__init__ arg:self arg:ratio arguments arg arg Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call Call Call FunctionDef name:__call__ arg:self arg:left arg:right arguments arg arg arg Assign Assign Assign Assign Assign Call Assign Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "url_params_from_lookup_dict",
    "source_code": "def url_params_from_lookup_dict(lookups):\n    params = {}\n    if lookups and hasattr(lookups, 'items'):\n        for k, v in lookups.items():\n            if callable(v):\n                v = v()\n            if isinstance(v, (tuple, list)):\n                v = ','.join((str(x) for x in v))\n            elif isinstance(v, bool):\n                v = ('0', '1')[v]\n            else:\n                v = str(v)\n            params[k] = v\n    return params",
    "docstring": "Convert the type of lookups specified in a ForeignKey limit_choices_to attribute to a dictionary of query parameters",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\widgets.py",
    "ast_data": "FunctionDef name:url_params_from_lookup_dict arg:lookups arguments arg Assign If BoolOp Call For Call If Call Assign Call If Call Assign Call Call If Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_from_tck",
    "source_code": "@classmethod\ndef _from_tck(cls, tck):\n    self = cls.__new__(cls)\n    if len(tck) != 5:\n        raise ValueError('tck should be a 5 element tuple of tx, ty, c, kx, ky')\n    self.tck = tck[:3]\n    self.degrees = tck[3:]\n    return self",
    "docstring": "Construct a spline object from given tck and degree",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py",
    "ast_data": "FunctionDef name:_from_tck arg:cls arg:tck arguments arg arg Assign Call If Compare Call Raise Call Assign Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "as_ternary",
    "source_code": "def as_ternary(cond, expr1, expr2):\n    return Expr(Op.TERNARY, (cond, expr1, expr2))",
    "docstring": "Return object as TERNARY expression (cond?expr1:expr2).",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:as_ternary arg:cond arg:expr1 arg:expr2 arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_on_leave",
    "source_code": "def _on_leave(self, event):\n    event.Skip()\n    LocationEvent('figure_leave_event', self, *self._mpl_coords(event), modifiers=self._mpl_modifiers(), guiEvent=event)._process()",
    "docstring": "Mouse has left the window.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py",
    "ast_data": "FunctionDef name:_on_leave arg:self arg:event arguments arg arg Call Call Call Call Call"
  },
  {
    "library": "kornia",
    "name": "line_segment_transfer_error_one_way",
    "source_code": "def line_segment_transfer_error_one_way(ls1: Tensor, ls2: Tensor, H: Tensor, squared: bool=False) -> Tensor:\n    KORNIA_CHECK_SHAPE(H, ['B', '3', '3'])\n    KORNIA_CHECK_SHAPE(ls1, ['B', 'N', '2', '2'])\n    KORNIA_CHECK_SHAPE(ls2, ['B', 'N', '2', '2'])\n    B, N = ls1.shape[:2]\n    ps1, pe1 = torch.chunk(ls1, dim=2, chunks=2)\n    ps2, pe2 = torch.chunk(ls2, dim=2, chunks=2)\n    ps2_h = convert_points_to_homogeneous(ps2)\n    pe2_h = convert_points_to_homogeneous(pe2)\n    ln2 = ps2_h.cross(pe2_h, dim=3)\n    ps1_in2 = convert_points_to_homogeneous(transform_points(H, ps1))\n    pe1_in2 = convert_points_to_homogeneous(transform_points(H, pe1))\n    er_st1 = (ln2 @ ps1_in2.transpose(-2, -1)).view(B, N).abs()\n    er_end1 = (ln2 @ pe1_in2.transpose(-2, -1)).view(B, N).abs()\n    error = 0.5 * (er_st1 + er_end1)\n    if squared:\n        error = error ** 2\n    return error",
    "docstring": "Return transfer error in image 2 for line segment correspondences given the homography matrix. Line segment end points are reprojected into image 2, and point-to-line error is calculated w.r.t. line, induced by line segment in image 2. See :cite: for details. Args: ls1: line segment correspondences from the left images with shape (B, N, 2, 2). ls2: line segment correspondences from the right images with shape (B, N, 2, 2). H: Homographies with shape :math:. squared: if True (default is False), the squared distance is returned. Returns: the computed distance with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\homography.py",
    "ast_data": "FunctionDef name:line_segment_transfer_error_one_way arg:ls1 arg:ls2 arg:H arg:squared arguments arg arg arg arg Call Call Call Assign Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Call Call Assign Call Call Call Assign If Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Ratkowsky02",
    "source_code": "class Ratkowsky02(Benchmark):\n\n    def __init__(self, dimensions=3):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([10, 0.5, 0.01], [200, 5.0, 0.5]))\n        self.global_optimum = [[72.462237576, 2.6180768402, 0.067359200066]]\n        self.fglob = 8.0565229338\n        self.a = asarray([8.93, 10.8, 18.59, 22.33, 39.35, 56.11, 61.73, 64.62, 67.08])\n        self.b = asarray([9.0, 14.0, 21.0, 28.0, 42.0, 57.0, 63.0, 70.0, 79.0])\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        vec = x[0] / (1 + exp(x[1] - x[2] * self.b))\n        return sum((self.a - vec) ** 2)",
    "docstring": "Ratkowsky02 objective function. This class defines the Ratkowsky 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Ratkowsky02}}(x) = \\sum_{m=1}^{9}(a_m - x[0] / (1 + exp(x[1] - b_m x[2]))^2 where .. math:: \\begin{cases} a=[8.93, 10.8, 18.59, 22.33, 39.35, 56.11, 61.73, 64.62, 67.08]\\\\ b=[9., 14., 21., 28., 42., 57., 63., 70., 79.]\\\\ \\end{cases} Here :math:, :math: and :math: *Global optimum*: :math: for :math: .. [1]",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_R.py",
    "ast_data": "ClassDef name:Ratkowsky02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign Call Assign Call FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "on_logout",
    "source_code": "def on_logout(self, username):\n    pass",
    "docstring": "Process a successful logout event. :param username: The logged out user name. :type username: str",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:on_logout arg:self arg:username arguments arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "_iter_test_masks",
    "source_code": "def _iter_test_masks(self):\n    for f in self.unique_folds:\n        test_index = np.where(self.test_fold == f)[0]\n        test_mask = np.zeros(len(self.test_fold), dtype=bool)\n        test_mask[test_index] = True\n        yield test_mask",
    "docstring": "Generates boolean masks corresponding to test sets.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:_iter_test_masks arg:self arguments arg For Assign Call Compare Assign Call Call Assign"
  },
  {
    "library": "scrapy",
    "name": "LocalCache",
    "source_code": "class LocalCache(OrderedDict[_KT, _VT]):\n\n    def __init__(self, limit: int | None=None):\n        super().__init__()\n        self.limit: int | None = limit\n\n    def __setitem__(self, key: _KT, value: _VT) -> None:\n        if self.limit:\n            while len(self) >= self.limit:\n                self.popitem(last=False)\n        super().__setitem__(key, value)",
    "docstring": "Dictionary with a finite number of keys. Older items expires first.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\utils\\datatypes.py",
    "ast_data": "ClassDef name:LocalCache FunctionDef name:__init__ arg:self arg:limit arguments arg arg Call Call FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg If While Compare Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "resampled",
    "source_code": "def resampled(self, lutsize):\n    colors = self(np.linspace(0, 1, lutsize))\n    new_cmap = ListedColormap(colors, name=self.name)\n    new_cmap._rgba_over = self._rgba_over\n    new_cmap._rgba_under = self._rgba_under\n    new_cmap._rgba_bad = self._rgba_bad\n    return new_cmap",
    "docstring": "Return a new colormap with *lutsize* entries.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:resampled arg:self arg:lutsize arguments arg arg Assign Call Call Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "categorical_column_to_series",
    "source_code": "def categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]:\n    categorical = col.describe_categorical\n    if not categorical['is_dictionary']:\n        raise NotImplementedError('Non-dictionary categoricals not supported yet')\n    cat_column = categorical['categories']\n    if hasattr(cat_column, '_col'):\n        categories = np.array(cat_column._col)\n    else:\n        raise NotImplementedError(\"Interchanging categorical columns isn't supported yet, and our fallback of using the `col._col` attribute (a ndarray) failed.\")\n    buffers = col.get_buffers()\n    codes_buff, codes_dtype = buffers['data']\n    codes = buffer_to_ndarray(codes_buff, codes_dtype, offset=col.offset, length=col.size())\n    if len(categories) > 0:\n        values = categories[codes % len(categories)]\n    else:\n        values = codes\n    cat = pd.Categorical(values, categories=categories, ordered=categorical['is_ordered'])\n    data = pd.Series(cat)\n    data = set_nulls(data, col, buffers['validity'])\n    return (data, buffers)",
    "docstring": "Convert a column holding categorical data to a pandas Series. Parameters ---------- col : Column Returns ------- tuple Tuple of pd.Series holding the data and the memory owner object that keeps the memory alive.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\interchange\\from_dataframe.py",
    "ast_data": "FunctionDef name:categorical_column_to_series arg:col arguments arg Assign If Raise Call Assign If Call Assign Call Raise Call Assign Call Assign Assign Call Call If Compare Call Assign Call Assign Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_process_tensor_event_in_chunks",
    "source_code": "def _process_tensor_event_in_chunks(self, event, tensor_chunks):\n    value = event.summary.value[0]\n    debugger_plugin_metadata = json.loads(compat.as_text(value.metadata.plugin_data.content))\n    device_name = debugger_plugin_metadata['device']\n    num_chunks = debugger_plugin_metadata['numChunks']\n    chunk_index = debugger_plugin_metadata['chunkIndex']\n    if num_chunks <= 1:\n        return event\n    debug_node_name = value.node_name\n    timestamp = int(event.wall_time)\n    tensor_key = '%s_%s_%d' % (device_name, debug_node_name, timestamp)\n    if tensor_key not in tensor_chunks:\n        tensor_chunks[tensor_key] = [None] * num_chunks\n    chunks = tensor_chunks[tensor_key]\n    if value.tensor.tensor_content:\n        chunks[chunk_index] = value.tensor\n    elif value.tensor.string_val:\n        chunks[chunk_index] = event\n    if None not in chunks:\n        if value.tensor.tensor_content:\n            event.summary.value[0].tensor.tensor_content = b''.join((chunk.tensor_content for chunk in chunks))\n            del tensor_chunks[tensor_key]\n            return event\n        elif value.tensor.string_val:\n            merged_event = chunks[0]\n            for chunk in chunks[1:]:\n                merged_event.summary.value[0].tensor.string_val.extend(list(chunk.summary.value[0].tensor.string_val))\n            return merged_event",
    "docstring": "Possibly reassemble event chunks. Due to gRPC's message size limit, a large tensor can be encapsulated in multiple Event proto chunks to be sent through the debugger stream. This method keeps track of the chunks that have arrived, reassemble all chunks corresponding to a tensor when they have arrived and return the reassembled Event proto. Args: event: The single Event proto that has arrived. tensor_chunks: A dict used to keep track of the Event protos that have arrived but haven't been reassembled. Returns: If all Event protos corresponding to a tensor have arrived, returns the reassembled Event proto. Otherwise, return None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:_process_tensor_event_in_chunks arg:self arg:event arg:tensor_chunks arguments arg arg arg Assign Assign Call Call Assign Assign Assign If Compare Return return:yes Assign Assign Call Assign If Compare Assign Assign If Assign If Assign If Compare If Assign Call Return return:yes If Assign For Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__len__",
    "source_code": "def __len__(self):\n    return len(self._cs)",
    "docstring": "Return the number of points in this LineString.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\linestring.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_rename",
    "source_code": "@final\ndef _rename(self, name: Hashable) -> Self:\n    result = self._view()\n    result._name = name\n    return result",
    "docstring": "fastpath for rename if new name is already validated.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_rename arg:self arg:name arguments arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "_check_object_list_is_ordered",
    "source_code": "def _check_object_list_is_ordered(self):\n    ordered = getattr(self.object_list, 'ordered', None)\n    if ordered is not None and (not ordered):\n        obj_list_repr = '{} {}'.format(self.object_list.model, self.object_list.__class__.__name__) if hasattr(self.object_list, 'model') else '{!r}'.format(self.object_list)\n        warnings.warn('Pagination may yield inconsistent results with an unordered object_list: {}.'.format(obj_list_repr), UnorderedObjectListWarning, stacklevel=3)",
    "docstring": "Warn if self.object_list is unordered (typically a QuerySet).",
    "type": "method",
    "file_path": "django\\django\\core\\paginator.py",
    "ast_data": "FunctionDef name:_check_object_list_is_ordered arg:self arguments arg Assign Call If BoolOp Compare Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TextFileStringTableInitializer",
    "source_code": "class TextFileStringTableInitializer(TextFileInitializer):\n\n    def __init__(self, filename, key_column_index=TextFileIndex.LINE_NUMBER, value_column_index=TextFileIndex.WHOLE_LINE, vocab_size=None, delimiter='\\t', name='text_file_string_table_init'):\n        super(TextFileStringTableInitializer, self).__init__(filename, dtypes.int64, key_column_index, dtypes.string, value_column_index, vocab_size=vocab_size, delimiter=delimiter, name=name)",
    "docstring": "Table initializer for IDs to string tables from a text file.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "ClassDef name:TextFileStringTableInitializer FunctionDef name:__init__ arg:self arg:filename arg:key_column_index arg:value_column_index arg:vocab_size arg:delimiter arg:name arguments arg arg arg arg arg arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_gather",
    "source_code": "def _gather(self, per_replica_value, destinations, axis, options=None):\n    if isinstance(per_replica_value, indexed_slices.IndexedSlices):\n        raise NotImplementedError('gather/all_gather does not support IndexedSlices')\n    if options is None:\n        options = collective_util.Options()\n    per_replica_value = _make_tensor_into_per_replica(per_replica_value)\n    validate_destinations(destinations)\n    if self._num_between_graph_workers == 1 and len(per_replica_value.values) == 1 and _devices_match(per_replica_value, destinations, self._canonicalize_devices):\n        with ops.device(per_replica_value.values[0].device):\n            v = array_ops.identity(per_replica_value.values[0])\n        return distribute_utils.regroup((v,), wrap_class=value_lib.Mirrored)\n    return self._gather_implementation(per_replica_value, destinations, axis, options)",
    "docstring": "Gather to . Args: per_replica_value: a , or a like object. destinations: a , a , a alike object, or a device string. It specifies the devices to gather to. To perform an all-gather, pass the same to and . Note that if it's a , the value is gathered to the devices of that variable, and this method doesn't update the variable. axis: specifies the dimension to gather along within each replica's tensor. options: a . See for details. Returns: A or Raises: ValueError: if per_replica_value can't be converted to a or if destinations is not a string, or .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:_gather arg:self arg:per_replica_value arg:destinations arg:axis arg:options arguments arg arg arg arg arg If Call Raise Call If Compare Assign Call Assign Call Call If BoolOp Compare Compare Call Call With Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "onnx_symbolic",
    "source_code": "def onnx_symbolic(name: str, opset: Union[OpsetVersion, Sequence[OpsetVersion]], decorate: Optional[Sequence[Callable]]=None, custom: bool=False) -> Callable:\n\n    def wrapper(func: Callable[_P, _R]) -> Callable[_P, _R]:\n        decorated = func\n        if decorate is not None:\n            for decorate_func in decorate:\n                decorated = decorate_func(decorated)\n        global registry\n        nonlocal opset\n        if isinstance(opset, OpsetVersion):\n            opset = (opset,)\n        for opset_version in opset:\n            registry.register(name, opset_version, decorated, custom=custom)\n        return func\n    return wrapper",
    "docstring": "Registers a symbolic function. Usage:: Args: name: The qualified name of the function in the form of 'domain::op'. E.g. 'aten::add'. opset: The opset versions of the function to register at. decorate: A sequence of decorators to apply to the function. custom: Whether the function is a custom symbolic function. Raises: ValueError: If the separator '::' is not in the name.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py",
    "ast_data": "FunctionDef name:onnx_symbolic arg:name arg:opset arg:decorate arg:custom arguments arg arg arg arg FunctionDef name:wrapper arg:func arguments arg Assign If Compare For Assign Call If Call Assign For Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "get_placeholder",
    "source_code": "def get_placeholder(self, value, compiler, connection):\n    return connection.ops.get_geom_placeholder(self, value, compiler)",
    "docstring": "Return the placeholder for the spatial column for the given value.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\models\\fields.py",
    "ast_data": "FunctionDef name:get_placeholder arg:self arg:value arg:compiler arg:connection arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "check_min_max_valid",
    "source_code": "def check_min_max_valid(min_val: torch.Tensor, max_val: torch.Tensor) -> bool:\n    if min_val.numel() == 0 or max_val.numel() == 0:\n        warnings.warn('must run observer before calling calculate_qparams. ' + 'Returning default values.')\n        return False\n    if min_val.dim() == 0 or max_val.dim() == 0:\n        if min_val == float('inf') and max_val == float('-inf'):\n            warnings.warn('must run observer before calling calculate_qparams. ' + 'Returning default values.')\n            return False\n        assert min_val <= max_val, f'min {min_val} should be less than max {max_val}'\n    else:\n        assert torch.all(min_val <= max_val), f'min {min_val} should be less than max {max_val}'\n    return True",
    "docstring": "Checks if the given minimum and maximum values are valid, meaning that they exist and the min value is less than the max value.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:check_min_max_valid arg:min_val arg:max_val arguments arg arg If BoolOp Compare Call Compare Call Call Return return:yes If BoolOp Compare Call Compare Call If BoolOp Compare Call Compare Call Call Return return:yes Compare Call Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, initial_learning_rate, decay_steps, num_periods=0.5, alpha=0.0, beta=0.001, name=None):\n    super(LinearCosineDecay, self).__init__()\n    self.initial_learning_rate = initial_learning_rate\n    self.decay_steps = decay_steps\n    self.num_periods = num_periods\n    self.alpha = alpha\n    self.beta = beta\n    self.name = name",
    "docstring": "Applies linear cosine decay to the learning rate. Args: initial_learning_rate: A scalar or Tensor or a Python number. The initial learning rate. decay_steps: A scalar or or a Python number. Number of steps to decay over. num_periods: Number of periods in the cosine part of the decay. See computation above. alpha: See computation above. beta: See computation above. name: String. Optional name of the operation. Defaults to 'LinearCosineDecay'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:initial_learning_rate arg:decay_steps arg:num_periods arg:alpha arg:beta arg:name arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "register",
    "source_code": "def register(self, *types, **kwargs):\n\n    def _df(func):\n        self.add(types, func, **kwargs)\n        return func\n    return _df",
    "docstring": "register dispatcher with new implementation >>> # xdoctest: +SKIP >>> f = Dispatcher(\"f\") >>> @f.register(int) ... def inc(x): ... return x + 1 >>> @f.register(float) ... def dec(x): ... return x - 1 >>> @f.register(list) ... @f.register(tuple) ... def reverse(x): ... return x[::-1] >>> f(1) 2 >>> f(1.0) 0.0 >>> f([1, 2, 3]) [3, 2, 1]",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:register arg:self arguments arg arg arg FunctionDef name:_df arg:func arguments arg Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "inner_dim_sizes",
    "source_code": "@property\ndef inner_dim_sizes(self):\n    return self._inner_dim_sizes",
    "docstring": "The inner dimension sizes for this shape. Returns: A 1-D integer .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:inner_dim_sizes arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "ToolYScale",
    "source_code": "class ToolYScale(AxisScaleBase):\n    description = 'Toggle scale Y axis'\n    default_keymap = property(lambda self: mpl.rcParams['keymap.yscale'])\n\n    def set_scale(self, ax, scale):\n        ax.set_yscale(scale)",
    "docstring": "Tool to toggle between linear and logarithmic scales on the Y axis.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ToolYScale Assign Assign Call arguments arg FunctionDef name:set_scale arg:self arg:ax arg:scale arguments arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "get_size",
    "source_code": "def get_size(self):\n    return self.get_shape()[:2]",
    "docstring": "Return the size of the image as tuple (numrows, numcols).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:get_size arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_resources_dir",
    "source_code": "def get_resources_dir(self, can_create=False) -> Optional[str]:\n    if not self.extra_resources_collection:\n        return None\n    if self.resources_dir:\n        return self.resources_dir\n    generated_path = ExecutionTraceObserver.get_resources_dir_for_et_path(self.output_file_path, create_dir=can_create)\n    if not generated_path:\n        return None\n    self.resources_dir = generated_path\n    return self.resources_dir",
    "docstring": "Generates the resources directory for the generated kernels, or index tensor data or any other metadata that is required to complete the Execution Trace content. The directory is created right where the ET file is being output. Only works if the observer has called set_extra_resource_collection(val=True). Returns None if the observer is not configured with extra resource collection.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:get_resources_dir arg:self arg:can_create arguments arg arg If Return return:no If Return return:yes Assign Call If Return return:no Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_FilterDataset",
    "source_code": "class _FilterDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, predicate, use_legacy_function=False, name=None):\n        self._input_dataset = input_dataset\n        wrapped_func = structured_function.StructuredFunctionWrapper(predicate, self._transformation_name(), dataset=input_dataset, use_legacy_function=use_legacy_function)\n        if not wrapped_func.output_structure.is_compatible_with(tensor_spec.TensorSpec([], dtypes.bool)):\n            raise ValueError(f'Invalid `predicate`. `predicate` must return a `tf.bool` scalar tensor, but its return type is {wrapped_func.output_structure}.')\n        self._predicate = wrapped_func\n        self._name = name\n        variant_tensor = gen_dataset_ops.filter_dataset(input_dataset._variant_tensor, other_arguments=self._predicate.function.captured_inputs, predicate=self._predicate.function, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)\n\n    def _functions(self):\n        return [self._predicate]\n\n    def _transformation_name(self):\n        return 'Dataset.filter()'",
    "docstring": "A that filters its input according to a predicate function.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\filter_op.py",
    "ast_data": "ClassDef name:_FilterDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:predicate arg:use_legacy_function arg:name arguments arg arg arg arg arg Assign Assign Call Call If Call Call Raise Call Assign Assign Assign Call Call Call FunctionDef name:_functions arg:self arguments arg Return return:yes FunctionDef name:_transformation_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, learning_rate, use_locking=False, name='GradientDescent'):\n    super(GradientDescentOptimizer, self).__init__(use_locking, name)\n    self._learning_rate = learning_rate\n    self._learning_rate_tensor = None",
    "docstring": "Construct a new gradient descent optimizer. Args: learning_rate: A Tensor or a floating point value. The learning rate to use. use_locking: If True use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to \"GradientDescent\". @compatibility(eager) When eager execution is enabled, can be a callable that takes no arguments and returns the actual value to use. This can be useful for changing these values across different invocations of optimizer functions. @end_compatibility",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\gradient_descent.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:learning_rate arg:use_locking arg:name arguments arg arg arg arg Call Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "trimboth",
    "source_code": "def trimboth(data, proportiontocut=0.2, inclusive=(True, True), axis=None):\n    return trimr(data, limits=(proportiontocut, proportiontocut), inclusive=inclusive, axis=axis)",
    "docstring": "Trims the smallest and largest data values. Trims the by masking the ``. Default is 0.2. inclusive : {(bool, bool) tuple}, optional Tuple indicating whether the number of data being masked on each side should be rounded (True) or truncated (False). axis : int, optional Axis along which to perform the trimming. If None, the input array is first flattened.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:trimboth arg:data arg:proportiontocut arg:inclusive arg:axis arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "size",
    "source_code": "def size(self, name: str, index: int):\n    assert isinstance(index, int)\n    if name is None:\n        val = self.output_node.get_size()[index]\n    else:\n        assert isinstance(name, str)\n        val = self.named_input_nodes[name].get_size()[index]\n    return texpr(self.rename_indexing(val))",
    "docstring": "Hook called from template code to get the size of an arg. Will add needed args to pass it in if it is dynamic.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arg:index arguments arg arg arg Call If Compare Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "load_from_library",
    "source_code": "def load_from_library(library, label, names):\n    subset = Library()\n    for name in names:\n        found = False\n        if name in library.tags:\n            found = True\n            subset.tags[name] = library.tags[name]\n        if name in library.filters:\n            found = True\n            subset.filters[name] = library.filters[name]\n        if found is False:\n            raise TemplateSyntaxError(\"'%s' is not a valid tag or filter in tag library '%s'\" % (name, label))\n    return subset",
    "docstring": "Return a subset of tags and filters from a library.",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:load_from_library arg:library arg:label arg:names arguments arg arg arg Assign Call For Assign If Compare Assign Assign If Compare Assign Assign If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_remove_extraneous_pytrees",
    "source_code": "def _remove_extraneous_pytrees(gm: torch.fx.GraphModule) -> None:\n    for node in gm.graph.nodes:\n        if node.op == 'call_module':\n            _try_remove_connecting_pytrees(node)\n    gm.graph.eliminate_dead_code()",
    "docstring": "Remove extraneous pytree flatten/unflatten calls. We try a couple of optimizations here: 1. Remove pytree flatten/unflatten calls between modules 2. TODO: Remove module's in_spec + initial unflatten call 3. TODO: Remove module's out_spec + final flatten call",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_swap.py",
    "ast_data": "FunctionDef name:_remove_extraneous_pytrees arg:gm arguments arg For If Compare Call Call"
  },
  {
    "library": "numpy",
    "name": "tensorsolve",
    "source_code": "@array_function_dispatch(_tensorsolve_dispatcher)\ndef tensorsolve(a, b, axes=None):\n    a, wrap = _makearray(a)\n    b = asarray(b)\n    an = a.ndim\n    if axes is not None:\n        allaxes = list(range(an))\n        for k in axes:\n            allaxes.remove(k)\n            allaxes.insert(an, k)\n        a = a.transpose(allaxes)\n    oldshape = a.shape[-(an - b.ndim):]\n    prod = 1\n    for k in oldshape:\n        prod *= k\n    if a.size != prod ** 2:\n        raise LinAlgError('Input arrays must satisfy the requirement             prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])')\n    a = a.reshape(prod, prod)\n    b = b.ravel()\n    res = wrap(solve(a, b))\n    res.shape = oldshape\n    return res",
    "docstring": "Solve the tensor equation `xaQaaaa` is singular or not 'square' (in the above sense). See Also -------- numpy.tensordot, tensorinv, numpy.einsum Examples -------- >>> import numpy as np >>> a = np.eye(2*3*4) >>> a.shape = (2*3, 4, 2, 3, 4) >>> rng = np.random.default_rng() >>> b = rng.normal(size=(2*3, 4)) >>> x = np.linalg.tensorsolve(a, b) >>> x.shape (2, 3, 4) >>> np.allclose(np.tensordot(a, x, axes=3), b) True",
    "type": "function",
    "file_path": "numpy\\numpy\\linalg\\_linalg.py",
    "ast_data": "FunctionDef name:tensorsolve arg:a arg:b arg:axes arguments arg arg arg Assign Call Assign Call Assign If Compare Assign Call Call For Call Call Assign Call Assign Assign For If Compare Raise Call Assign Call Assign Call Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_find_closest_point_on_path",
    "source_code": "def _find_closest_point_on_path(xys, p):\n    if len(xys) == 1:\n        return (((p - xys[0]) ** 2).sum(), xys[0], (0, 0))\n    dxys = xys[1:] - xys[:-1]\n    norms = (dxys ** 2).sum(axis=1)\n    norms[norms == 0] = 1\n    rel_projs = np.clip(((p - xys[:-1]) * dxys).sum(axis=1) / norms, 0, 1)[:, None]\n    projs = xys[:-1] + rel_projs * dxys\n    d2s = ((projs - p) ** 2).sum(axis=1)\n    imin = np.argmin(d2s)\n    return (d2s[imin], projs[imin], (imin, imin + 1))",
    "docstring": "Parameters ---------- xys : (N, 2) array-like Coordinates of vertices. p : (float, float) Coordinates of point. Returns ------- d2min : float Minimum square distance of *p* to *xys*. proj : (float, float) Projection of *p* onto *xys*. imin : (int, int) Consecutive indices of vertices of segment in *xys* where *proj* is. Segments are considered as including their end-points; i.e. if the closest point on the path is a node in *xys* with index *i*, this returns ``.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:_find_closest_point_on_path arg:xys arg:p arguments arg arg If Compare Call Return return:yes Call Assign Assign Call Assign Compare Assign Call Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "current_accelerator",
    "source_code": "def current_accelerator(check_available: bool=False) -> Optional[torch.device]:\n    if (acc := torch._C._accelerator_getAccelerator()) is not None:\n        if not check_available or (check_available and is_available()):\n            return acc\n    return None",
    "docstring": "Return the device of the accelerator available at compilation time. If no accelerator were available at compilation time, returns None. See :ref: for details. Args: check_available (bool, optional): if True, will also do a runtime check to see if the device :func: on top of the compile-time check. Default: `torch.devicetorch.devicetorch.accelerator.current_device_indexmultiprocessing-poison-fork-note`. Example:: >>> # xdoctest: >>> # If an accelerator is available, sent the model to it >>> model = torch.nn.Linear(2, 2) >>> if (current_device := current_accelerator(check_available=True)) is not None: >>> model.to(current_device)",
    "type": "function",
    "file_path": "pytorch\\torch\\accelerator\\__init__.py",
    "ast_data": "FunctionDef name:current_accelerator arg:check_available arguments arg If Compare Call If BoolOp BoolOp Call Return return:yes Return return:no"
  },
  {
    "library": "scipy",
    "name": "ihfft2",
    "source_code": "@_dispatch\ndef ihfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *, plan=None):\n    return (Dispatchable(x, np.ndarray),)",
    "docstring": "Compute the 2-D inverse FFT of a real spectrum. Parameters ---------- x : array_like The input array s : sequence of ints, optional Shape of the real input to the inverse FFT. axes : sequence of ints, optional The axes over which to compute the inverse fft. Default is the last two axes. norm : {\"backward\", \"ortho\", \"forward\"}, optional Normalization mode (see ). Default is \"backward\". overwrite_x : bool, optional If True, the contents of can be destroyed; the default is False. See :func: for more details. workers : int, optional Maximum number of workers to use for parallel computation. If negative, the value wraps around from `~scipy.fft.fftihfftnihfftn`.",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_basic.py",
    "ast_data": "FunctionDef name:ihfft2 arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "deserialize",
    "source_code": "def deserialize(self, s, key, decode=None):\n    if isinstance(s, dict):\n        return self.deserialize_json(s, key, decode)\n    s = to_bytes(s)\n    if s.startswith(b'{') and s.endswith(b'}'):\n        return self.deserialize_json(s, key, decode)\n    return self.deserialize_compact(s, key, decode)",
    "docstring": "Deserialize JWS Serialization, both compact and JSON format. It will automatically deserialize depending on the given JWS. :param s: text of JWS Compact/JSON Serialization :param key: key used to verify the signature :param decode: a function to decode payload data :return: dict :raise: BadSignatureError If key is not provided, it will still deserialize the serialization without verification.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7515\\jws.py",
    "ast_data": "FunctionDef name:deserialize arg:self arg:s arg:key arg:decode arguments arg arg arg arg If Call Return return:yes Call Assign Call If BoolOp Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "list_logical_devices",
    "source_code": "@tf_export('config.list_logical_devices', 'config.experimental.list_logical_devices')\n@deprecation.deprecated_endpoints('config.experimental.list_logical_devices')\ndef list_logical_devices(device_type=None):\n    return context.context().list_logical_devices(device_type=device_type)",
    "docstring": "Return a list of logical devices created by runtime. Logical devices may correspond to physical devices or remote devices in the cluster. Operations and tensors may be placed on these devices by using the of the . Calling triggers the runtime to configure any visible to the runtime, thereby preventing further configuration. To avoid runtime initialization, call instead. For example: >>> logical_devices = tf.config.list_logical_devices('GPU') >>> if len(logical_devices) > 0: ... # Allocate on GPU:0 ... with tf.device(logical_devices[0].name): ... one = tf.constant(1) ... # Allocate on GPU:1 ... with tf.device(logical_devices[1].name): ... two = tf.constant(2) Args: device_type: (optional string) Only include devices matching this device type. For example \"CPU\" or \"GPU\". Notes: 1. If provided with any numerical values or any string other than supported device type such as 'CPU' it returns an empty list instead of raising error. 2. For default value it returns all logical devices Returns: List of initialized s",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py",
    "ast_data": "FunctionDef name:list_logical_devices arg:device_type arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "__get_tensor_shard__",
    "source_code": "def __get_tensor_shard__(self, index: int) -> torch.Tensor:\n    raise NotImplementedError('_Checkpointable._get_tensor_shard is not implemented')",
    "docstring": "Return a 'torch.Tensor' shard based on 'MetadataIndex'.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_checkpointable.py",
    "ast_data": "FunctionDef name:__get_tensor_shard__ arg:self arg:index arguments arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "RidgeBenchmark",
    "source_code": "class RidgeBenchmark(Predictor, Estimator, Benchmark):\n    param_names = ['representation', 'solver']\n    params = (['dense', 'sparse'], ['auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'])\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        representation, solver = params\n        if representation == 'dense':\n            data = _synth_regression_dataset(n_samples=500000, n_features=100)\n        else:\n            data = _synth_regression_sparse_dataset(n_samples=100000, n_features=10000, density=0.005)\n        return data\n\n    def make_estimator(self, params):\n        representation, solver = params\n        estimator = Ridge(solver=solver, fit_intercept=False, random_state=0)\n        return estimator\n\n    def make_scorers(self):\n        make_gen_reg_scorers(self)\n\n    def skip(self, params):\n        representation, solver = params\n        if representation == 'sparse' and solver == 'svd':\n            return True\n        return False",
    "docstring": "Benchmarks for Ridge.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\linear_model.py",
    "ast_data": "ClassDef name:RidgeBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign If Compare Assign Call Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call FunctionDef name:skip arg:self arg:params arguments arg arg Assign If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "media",
    "source_code": "@property\ndef media(self):\n    media = Media()\n    for field in self.fields.values():\n        media += field.widget.media\n    return media",
    "docstring": "Return all media required to render the widgets on this form.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:media arg:self arguments arg Assign Call For Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "@available_if(_can_inverse_transform)\ndef inverse_transform(self, X, **params):\n    with _raise_or_warn_if_not_fitted(self):\n        _raise_for_params(params, self, 'inverse_transform')\n        routed_params = process_routing(self, 'inverse_transform', **params)\n        reverse_iter = reversed(list(self._iter()))\n        for _, name, transform in reverse_iter:\n            X = transform.inverse_transform(X, **routed_params[name].inverse_transform)\n        return X",
    "docstring": "Apply for each step in a reverse order. All estimators in the pipeline must support . Parameters ---------- X : array-like of shape (n_samples, n_transformed_features) Data samples, where `enable_metadata_routing=TrueMetadata Routing User Guide ` for more details. Returns ------- X_original : ndarray of shape (n_samples, n_features) Inverse transformed data, that is, data in the original feature space.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg arg With Call Call Assign Call Assign Call Call Call For Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "decompose_triton_kernel_wrapper_functional",
    "source_code": "def decompose_triton_kernel_wrapper_functional(graph):\n    graph_pass = PatternMatcherPass()\n\n    @register_graph_pattern(CallFunctionVarArgs(torch.ops.higher_order.triton_kernel_wrapper_functional), pass_dict=graph_pass)\n    def _(match: Match, *args, **kwargs):\n        from torch._higher_order_ops.triton_kernel_wrap import triton_kernel_wrapper_functional_dense\n        flat_args, spec = pytree.tree_flatten((args, kwargs))\n\n        def decomp(*flat_args):\n            args, kwargs = pytree.tree_unflatten(flat_args, spec)\n            return (triton_kernel_wrapper_functional_dense(*args, **kwargs),)\n        match.replace_by_example(decomp, flat_args, run_functional_passes=False)\n    graph_pass.apply(graph)\n    for node in graph.find_nodes(op='call_function', target=torch.ops.higher_order.triton_kernel_wrapper_functional):\n        raise AssertionError('triton_kernel_wrapper_functional was not removed')",
    "docstring": "Decomposes triton_kernel_wrapper_functional nodes into clones and the underlying mutation node. We assume that the reinplacing pass runs before this; the reinplacing pass tells us (via rewriting the arguments or .meta to those nodes) which Tensors we should clone and which Tensors are safe to reinplace.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:decompose_triton_kernel_wrapper_functional arg:graph arguments arg Assign Call FunctionDef name:_ arg:match arguments arg arg arg Assign Call FunctionDef name:decomp arguments arg Assign Call Return return:yes Call Call Call Call Call For Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "update",
    "source_code": "def update(self, left=None, bottom=None, right=None, top=None, wspace=None, hspace=None):\n    if (left if left is not None else self.left) >= (right if right is not None else self.right):\n        raise ValueError('left cannot be >= right')\n    if (bottom if bottom is not None else self.bottom) >= (top if top is not None else self.top):\n        raise ValueError('bottom cannot be >= top')\n    if left is not None:\n        self.left = left\n    if right is not None:\n        self.right = right\n    if bottom is not None:\n        self.bottom = bottom\n    if top is not None:\n        self.top = top\n    if wspace is not None:\n        self.wspace = wspace\n    if hspace is not None:\n        self.hspace = hspace",
    "docstring": "Update the dimensions of the passed parameters. *None* means unchanged.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:update arg:self arg:left arg:bottom arg:right arg:top arg:wspace arg:hspace arguments arg arg arg arg arg arg arg If Compare Compare Compare Raise Call If Compare Compare Compare Raise Call If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign If Compare Assign"
  },
  {
    "library": "scipy",
    "name": "hess",
    "source_code": "@property\ndef hess(self):\n    if self._h is None:\n        self._h = self._hess(self._x)\n    return self._h",
    "docstring": "Value of Hessian of objective function at current iteration.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion.py",
    "ast_data": "FunctionDef name:hess arg:self arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self.values.name",
    "docstring": "The name of this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "enable_control_flow_v2",
    "source_code": "@tf_export(v1=['enable_control_flow_v2'])\ndef enable_control_flow_v2():\n    logging.vlog(1, 'Enabling control flow v2')\n    ops._control_flow_api_gauge.get_cell().set(True)\n    control_flow_util.ENABLE_CONTROL_FLOW_V2 = True",
    "docstring": "Use control flow v2. control flow v2 (cfv2) is an improved version of control flow in TensorFlow with support for higher order derivatives. Enabling cfv2 will change the graph/function representation of control flow, e.g., and will generate functional and ops instead of low-level , etc. ops. Note: Importing and running graphs exported with old control flow will still be supported. Calling tf.enable_control_flow_v2() lets you opt-in to this TensorFlow 2.0 feature. Note: v2 control flow is always enabled inside of tf.function. Calling this function is not required.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_v2_toggles.py",
    "ast_data": "FunctionDef name:enable_control_flow_v2 arguments Call Call Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "SplitResult",
    "source_code": "@compatibility(is_backward_compatible=False)\nclass SplitResult(NamedTuple):\n    split_module: torch.fx.GraphModule\n    submodule_inputs: dict[str, Any]\n    non_acc_submodule_prefix: str",
    "docstring": "Stores the results of the splitter. Attributes: split_module: root module after splitting. submodule_inputs: a dict that maps submodule name to its inputs. non_acc_submodule_prefix: the prefix for non acc submodules. For acc submodule the prefix is alwasy \"_run_on_acc_\".",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py",
    "ast_data": "ClassDef name:SplitResult Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_binary",
    "source_code": "def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, max_iter):\n    coef, intercept, n_iter_ = fit_binary(self, 1, X, y, alpha, C, learning_rate, max_iter, self._expanded_class_weight[1], self._expanded_class_weight[0], sample_weight, random_state=self.random_state)\n    self.t_ += n_iter_ * X.shape[0]\n    self.n_iter_ = n_iter_\n    if self.average > 0:\n        if self.average <= self.t_ - 1:\n            self.coef_ = self._average_coef.reshape(1, -1)\n            self.intercept_ = self._average_intercept\n        else:\n            self.coef_ = self._standard_coef.reshape(1, -1)\n            self._standard_intercept = np.atleast_1d(intercept)\n            self.intercept_ = self._standard_intercept\n    else:\n        self.coef_ = coef.reshape(1, -1)\n        self.intercept_ = np.atleast_1d(intercept)",
    "docstring": "Fit a binary classifier on X and y.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:_fit_binary arg:self arg:X arg:y arg:alpha arg:C arg:sample_weight arg:learning_rate arg:max_iter arguments arg arg arg arg arg arg arg arg Assign Call Assign If Compare If Compare Assign Call Assign Assign Call Assign Call Assign Assign Call Assign Call"
  },
  {
    "library": "kornia",
    "name": "channels",
    "source_code": "@property\ndef channels(self) -> int:\n    return self.layout.channels",
    "docstring": "Return the number channels of the image.",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:channels arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_healthcheck_server",
    "source_code": "def create_healthcheck_server(alive_callback: Callable[[], int], port: int, timeout: int) -> HealthCheckServer:\n    return HealthCheckServer(alive_callback, port, timeout)",
    "docstring": "creates health check server object",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\health_check_server.py",
    "ast_data": "FunctionDef name:create_healthcheck_server arg:alive_callback arg:port arg:timeout arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "conv_input_length",
    "source_code": "def conv_input_length(output_length, filter_size, padding, stride):\n    if output_length is None:\n        return None\n    assert padding in {'same', 'valid', 'full'}\n    if padding == 'same':\n        pad = filter_size // 2\n    elif padding == 'valid':\n        pad = 0\n    elif padding == 'full':\n        pad = filter_size - 1\n    return (output_length - 1) * stride - 2 * pad + filter_size",
    "docstring": "Determines input length of a convolution given output length. Args: output_length: integer. filter_size: integer. padding: one of \"same\", \"valid\", \"full\". stride: integer. Returns: The input length (integer).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\layers\\utils.py",
    "ast_data": "FunctionDef name:conv_input_length arg:output_length arg:filter_size arg:padding arg:stride arguments arg arg arg arg If Compare Return return:no Compare If Compare Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "max_pool",
    "source_code": "@tf_export(v1=['nn.max_pool'])\n@dispatch.add_dispatch_support\ndef max_pool(value, ksize, strides, padding, data_format='NHWC', name=None, input=None):\n    value = deprecation.deprecated_argument_lookup('input', input, 'value', value)\n    with ops.name_scope(name, 'MaxPool', [value]) as name:\n        if data_format is None:\n            data_format = 'NHWC'\n        channel_index = 1 if data_format.startswith('NC') else 3\n        ksize = _get_sequence(ksize, 2, channel_index, 'ksize')\n        strides = _get_sequence(strides, 2, channel_index, 'strides')\n        if isinstance(padding, (list, tuple)) and data_format == 'NCHW_VECT_C':\n            raise ValueError(f\"`data_format='NCHW_VECT_C'` is not supported with explicit padding. Received: padding={padding}\")\n        padding, explicit_paddings = convert_padding(padding)\n        if np.isscalar(ksize) and ksize == 0 or (isinstance(ksize, (list, tuple, np.ndarray)) and any((v == 0 for v in ksize))):\n            raise ValueError(f'`ksize` cannot be zero. Received: ksize={ksize}')\n        return gen_nn_ops.max_pool(value, ksize=ksize, strides=strides, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, name=name)",
    "docstring": "Performs the max pooling on the input. Args: value: A 4-D of the format specified by . ksize: An int or list of that has length , or . The size of the window for each dimension of the input tensor. strides: An int or list of that has length , or . The stride of the sliding window for each dimension of the input tensor. padding: Either the or indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. When explicit padding is used and data_format is , this should be in the form . When explicit padding used and data_format is , this should be in the form . When using explicit padding, the size of the paddings cannot be greater than the sliding window size. data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported. name: Optional name for the operation. input: Alias for value. Returns: A of format specified by . The max pooled output tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:max_pool arg:value arg:ksize arg:strides arg:padding arg:data_format arg:name arg:input arguments arg arg arg arg arg arg arg Assign Call With Call If Compare Assign Assign Call Assign Call Assign Call If BoolOp Call Compare Raise Call Assign Call If BoolOp BoolOp Call Compare BoolOp Call Call Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "async_noop",
    "source_code": "def async_noop(name=None):\n    with ops.name_scope(name, 'async_noop') as name:\n        cond_init_value = constant_op.constant(False, name='cond_init_value')\n        func_graph_signature = [tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)]\n        cond_graph = func_graph_module.func_graph_from_py_func('cond_graph', lambda x: x, [cond_init_value], {}, signature=func_graph_signature, func_graph=util.WhileCondFuncGraph('cond_graph', collections=ops.get_default_graph()._collections), add_control_dependencies=False)\n        body_graph = func_graph_module.func_graph_from_py_func('body_graph', lambda x: x, [cond_init_value], {}, signature=func_graph_signature, func_graph=util.WhileBodyFuncGraph('body_graph', collections=ops.get_default_graph()._collections), add_control_dependencies=False)\n        while_op, _ = util.get_op_and_outputs(gen_functional_ops._while([cond_init_value], util.create_new_tf_function(cond_graph), util.create_new_tf_function(body_graph), output_shapes=[[]], name=name))\n        util.maybe_set_lowering_attr(while_op, lower_using_switch_merge=False)\n    return while_op",
    "docstring": "Returns a no-op that is implemented as an async kernel. This operation may be useful to implement \"aggressive inter-op parallelism\" because it will cause any immediate downstream operations to be scheduled on different threads. Args: name: The name of the operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:async_noop arg:name arguments arg With Call Assign Call Assign Call Assign Call arguments arg Call Call Assign Call arguments arg Call Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "smart_cond",
    "source_code": "def smart_cond(pred, true_fn=None, false_fn=None, name=None):\n    if isinstance(pred, variables.Variable):\n        return cond.cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)\n    return smart_module.smart_cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)",
    "docstring": "Return either if predicate is true else . If is a bool or has a constant value, we return either or , otherwise we use to dynamically route to both. Args: pred: A scalar determining whether to return the result of or . true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. name: Optional name prefix when using . Returns: Tensors returned by the call to either or . Raises: TypeError: If or is not callable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\control_flow_util.py",
    "ast_data": "FunctionDef name:smart_cond arg:pred arg:true_fn arg:false_fn arg:name arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "__next__",
    "source_code": "def __next__(self):\n    line = self.readline()\n    if not line:\n        raise StopIteration\n    return line",
    "docstring": "Return the next line of bytes.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpreqbody.py",
    "ast_data": "FunctionDef name:__next__ arg:self arguments arg Assign Call If Raise Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_sparse_tensors",
    "source_code": "def get_sparse_tensors(self, transformation_cache, state_manager):\n    return CategoricalColumn.IdWeightPair(transformation_cache.get(self, state_manager), None)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_sparse_tensors arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_copy_array_if_base_present",
    "source_code": "def _copy_array_if_base_present(a):\n    if a.base is not None:\n        return a.copy()\n    return a",
    "docstring": "Copy the array if its base points to a parent array.",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\distance.py",
    "ast_data": "FunctionDef name:_copy_array_if_base_present arg:a arguments arg If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "django",
    "name": "render_value_in_context",
    "source_code": "def render_value_in_context(value, context):\n    value = template_localtime(value, use_tz=context.use_tz)\n    value = localize(value, use_l10n=context.use_l10n)\n    if context.autoescape:\n        if not issubclass(type(value), str):\n            value = str(value)\n        return conditional_escape(value)\n    else:\n        return str(value)",
    "docstring": "Convert any value to a string to become part of a rendered template. This means escaping, if required, and conversion to a string. If value is a string, it's expected to already be translated.",
    "type": "function",
    "file_path": "django\\django\\template\\base.py",
    "ast_data": "FunctionDef name:render_value_in_context arg:value arg:context arguments arg arg Assign Call Assign Call If If Call Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "data_path",
    "source_code": "def data_path(path: str | os.PathLike[str], createdir: bool=False) -> str:\n    path_obj = Path(path)\n    if not path_obj.is_absolute():\n        if inside_project():\n            path_obj = Path(project_data_dir(), path)\n        else:\n            path_obj = Path('.scrapy', path)\n    if createdir and (not path_obj.exists()):\n        path_obj.mkdir(parents=True)\n    return str(path_obj)",
    "docstring": "Return the given path joined with the .scrapy data directory. If given an absolute path, return it unmodified.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\project.py",
    "ast_data": "FunctionDef name:data_path arg:path arg:createdir arguments arg arg Assign Call If Call If Call Assign Call Call Assign Call If BoolOp Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_unpatchify_8x8",
    "source_code": "def _unpatchify_8x8(input: Tensor, H: int, W: int) -> Tensor:\n    B, N = input.shape[:2]\n    output: Tensor = input.view(B, H // 8, W // 8, 8, 8).permute(0, 1, 3, 2, 4).reshape(B, H, W)\n    return output",
    "docstring": "Reverse non-overlapping 8 x 8 patching. Args: input (Tensor): Input image of the shape :math:. H: height of resulting tensor. W: width of resulting tensor. Returns: output (Tensor): Image patchify of the shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\jpeg.py",
    "ast_data": "FunctionDef name:_unpatchify_8x8 arg:input arg:H arg:W arguments arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_params_html",
    "source_code": "def _get_params_html(self, deep=True):\n    out = self.get_params(deep=deep)\n    init_func = getattr(self.__init__, 'deprecated_original', self.__init__)\n    init_default_params = inspect.signature(init_func).parameters\n    init_default_params = {name: param.default for name, param in init_default_params.items()}\n\n    def is_non_default(param_name, param_value):\n        if param_name not in init_default_params:\n            return True\n        if init_default_params[param_name] == inspect._empty:\n            return True\n        if isinstance(param_value, BaseEstimator) and type(param_value) is not type(init_default_params[param_name]):\n            return True\n        if param_value != init_default_params[param_name] and (not (is_scalar_nan(init_default_params[param_name]) and is_scalar_nan(param_value))):\n            return True\n        return False\n    remaining_params = [name for name in out if name not in init_default_params]\n    ordered_out = {name: out[name] for name in init_default_params if name in out}\n    ordered_out.update({name: out[name] for name in remaining_params})\n    non_default_ls = tuple([name for name, value in ordered_out.items() if is_non_default(name, value)])\n    return ParamsDict(ordered_out, non_default=non_default_ls)",
    "docstring": "Get parameters for this estimator with a specific HTML representation. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : ParamsDict Parameter names mapped to their values. We return a dictionary, which renders a specific HTML representation in table form.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:_get_params_html arg:self arg:deep arguments arg arg Assign Call Assign Call Assign Call Assign Call FunctionDef name:is_non_default arg:param_name arg:param_value arguments arg arg If Compare Return return:yes If Compare Return return:yes If BoolOp Call Compare Call Call Return return:yes If BoolOp Compare BoolOp Call Call Return return:yes Return return:yes Assign Compare Assign Compare Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "inputs",
    "source_code": "@property\ndef inputs(self):\n    return [x.op.inputs[0] for x in self._enters + self._direct_enters]",
    "docstring": "Input to all the Enter nodes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:inputs arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_store_sparse_tensors_join",
    "source_code": "def _store_sparse_tensors_join(tensor_list_list, enqueue_many, keep_input):\n    s0, sparse_info_list = _store_sparse_tensors(tensor_list_list[0], enqueue_many, keep_input)\n    stored_list_list = [s0]\n    for tensor_list in tensor_list_list[1:]:\n        s, sparse_info_candidate = _store_sparse_tensors(tensor_list, enqueue_many, keep_input, [st.map_op for st in sparse_info_list])\n        if sparse_info_list != sparse_info_candidate:\n            raise ValueError('Inconsistent SparseTensors list: %s vs. %s' % (tensor_list_list[0], tensor_list))\n        sparse_info_list = [info.merge_with(candidate) for info, candidate in zip(sparse_info_list, sparse_info_candidate)]\n        stored_list_list.append(s)\n    return (stored_list_list, sparse_info_list)",
    "docstring": "Store SparseTensors for feeding into batch_join, etc.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\training\\input.py",
    "ast_data": "FunctionDef name:_store_sparse_tensors_join arg:tensor_list_list arg:enqueue_many arg:keep_input arguments arg arg arg Assign Call Assign For Assign Call If Compare Raise Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "full_exception_context",
    "source_code": "def full_exception_context(exception: BaseException, *, message_log: Collection[str]=(), extensions: Collection[Extension]=(), full_traceback: bool=True) -> str:\n    messages = [f'    {strip_escape_sequences(msg)}'.rstrip() for msg in message_log]\n    while messages and (not messages[-1]):\n        messages.pop()\n    last_msgs = '\\n'.join(messages)\n    exts_list = '\\n'.join((f'* {ext.name} ({ext.version})' for ext in extensions if ext.version != 'builtin'))\n    exc_format = format_traceback(exception, short_traceback=not full_traceback)\n    return error_info(last_msgs or 'None.', exts_list or 'None.', exc_format)",
    "docstring": "Return a formatted message containing useful debugging context.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\_cli\\util\\errors.py",
    "ast_data": "FunctionDef name:full_exception_context arg:exception arguments arg arg arg arg Assign Call Call While BoolOp Call Assign Call Assign Call Compare Assign Call Return return:yes Call BoolOp BoolOp"
  },
  {
    "library": "numpy",
    "name": "_cache",
    "source_code": "def _cache(self, path):\n    import shutil\n    from urllib.request import urlopen\n    upath = self.abspath(path)\n    if not os.path.exists(os.path.dirname(upath)):\n        os.makedirs(os.path.dirname(upath))\n    if self._isurl(path):\n        with urlopen(path) as openedurl:\n            with _open(upath, 'wb') as f:\n                shutil.copyfileobj(openedurl, f)\n    else:\n        shutil.copyfile(path, upath)\n    return upath",
    "docstring": "Cache the file specified by path. Creates a copy of the file in the datasource cache.",
    "type": "method",
    "file_path": "numpy\\numpy\\lib\\_datasource.py",
    "ast_data": "FunctionDef name:_cache arg:self arg:path arguments arg arg Assign Call If Call Call Call Call If Call With Call With Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_versions_from_toml",
    "source_code": "def get_versions_from_toml() -> dict[str, str]:\n    install_map = _optional.INSTALL_MAPPING\n    optional_dependencies = {}\n    with open(SETUP_PATH, 'rb') as pyproject_f:\n        pyproject_toml = tomllib.load(pyproject_f)\n        opt_deps = pyproject_toml['project']['optional-dependencies']\n        dependencies = set(opt_deps['all'])\n        pytest_plugins = {dep for dep in opt_deps['test'] if dep.startswith('pytest-')}\n        dependencies = dependencies.difference(pytest_plugins)\n    for dependency in dependencies:\n        package, version = dependency.strip().split('>=')\n        optional_dependencies[install_map.get(package, package).casefold()] = version\n    for item in EXCLUDE_DEPS:\n        optional_dependencies.pop(item, None)\n    return optional_dependencies",
    "docstring": "Min versions in pyproject.toml for pip install pandas[extra].",
    "type": "function",
    "file_path": "pandas\\scripts\\validate_min_versions_in_sync.py",
    "ast_data": "FunctionDef name:get_versions_from_toml arguments Assign Assign With Call Assign Call Assign Assign Call Assign Call Assign Call For Assign Call Call Assign Call Call For Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "track_inputs",
    "source_code": "def track_inputs(self, inputs: tuple[Any, ...]) -> None:\n\n    def _track_inputs(t: torch.Tensor) -> None:\n        self._update_and_maybe_create_winfos(t, _FSDPRefType.INP)\n    tree_map_only(torch.Tensor, _track_inputs, inputs)",
    "docstring": "This is used to track the input tensors to the model and annotate them as ``. Args: inputs (Tuple[Any]): A tuple containing the input data. This can include tensors as well as other data types. Only tensors will be tracked.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\fsdp2_mem_tracker.py",
    "ast_data": "FunctionDef name:track_inputs arg:self arg:inputs arguments arg arg FunctionDef name:_track_inputs arg:t arguments arg Call Call"
  },
  {
    "library": "scipy",
    "name": "_munp",
    "source_code": "def _munp(self, n):\n    integrals = (self._hbins[1:] ** (n + 1) - self._hbins[:-1] ** (n + 1)) / (n + 1)\n    return np.sum(self._hpdf[1:-1] * integrals)",
    "docstring": "Compute the n-th non-central moment.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_munp arg:self arg:n arguments arg arg Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "compile",
    "source_code": "def compile(self, *, fullgraph: bool=False, dynamic: bool=False, backend: str='inductor', mode: Optional[str]=None, options: Optional[dict[Any, Any]]=None, disable: bool=False) -> None:\n    self.model.image_encoder = torch.compile(self.model.image_encoder, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode, options=options, disable=disable)\n    self.model.mask_decoder = torch.compile(self.model.mask_decoder, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode, options=options, disable=disable)\n    self.model.prompt_encoder = torch.compile(self.model.prompt_encoder, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode, options=options, disable=disable)",
    "docstring": "Apply /dynamo API into the VisualPrompter API. .. note:: For more information about the dynamo API check the official docs Args: fullgraph: Whether it is ok to break model into several subgraphs dynamic: Use dynamic shape tracing backend: backend to be used mode: Can be either “default”, “reduce-overhead” or “max-autotune” options: A dictionary of options to pass to the backend. disable: Turn torch.compile() into a no-op for testing Example: >>> # prompter = VisualPrompter() >>> # prompter.compile() # You should have torch >= 2.0.0 installed >>> # Use the prompter methods ...",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\visual_prompter.py",
    "ast_data": "FunctionDef name:compile arg:self arguments arg arg arg arg arg arg arg Assign Call Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "LossWrapper",
    "source_code": "class LossWrapper(torch.nn.Module):\n\n    def __init__(self, module, loss_fn):\n        super().__init__()\n        self.module = module\n        self.loss_fn = loss_fn\n\n    def forward(self, *args, **kwargs):\n        raise NotImplementedError('This instance of LossWrapper does not have an overriddenforward(). Please implement forward() to specify the arguments, connection between the module and loss, and loss output value.')",
    "docstring": "LossWrapper is a convenient abstract class that allows you to wrap up both your model as well as its loss function and specify the connectivity between the inputs, model, loss function, and output value. Example:: class MyModelWrapper(LossWrapper): def forward(self, x, targets): model_out = self.module(x) loss_value = self.loss_fn(model_out, targets) return loss_value The above example defines a connectivity where we expect the forward/loss/backward training procedure to take two arguments (x and targets), pass x into the module to get the output of the feedforward computation, pass the model output and the targets value into the loss function, and get and return the loss value, which will be backpropagated by PiPPy. The above class would then be instantiated like:: model = ... # instantiate the model loss_fn = torch.nn.MSELoss() # for the sake of demonstration wrapper = MyModelWrapper(model, loss_fn) pipe = Pipe.from_tracing(wrapper, ...)",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py",
    "ast_data": "ClassDef name:LossWrapper FunctionDef name:__init__ arg:self arg:module arg:loss_fn arguments arg arg arg Call Call Assign Assign FunctionDef name:forward arg:self arguments arg arg arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "RandomDissolving",
    "source_code": "class RandomDissolving(IntensityAugmentationBase2D):\n\n    def __init__(self, step_range: Tuple[float, float]=(100, 500), version: str='2.1', p: float=0.5, keepdim: bool=False, **kwargs: Any) -> None:\n        super().__init__(p=p, same_on_batch=True, keepdim=keepdim)\n        self.step_range = step_range\n        self._dslv = StableDiffusionDissolving(version, **kwargs)\n        self._param_generator = rg.PlainUniformGenerator((self.step_range, 'step_range_factor', None, None))\n\n    def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n        return self._dslv(input, params['step_range_factor'][0].long().item())",
    "docstring": "Perform dissolving transformation using StableDiffusion models. Based on :cite:, the dissolving transformation is essentially applying one-step reverse diffusion. Our implementation currently supports HuggingFace implementations of SD 1.4, 1.5 and 2.1. SD 1.X tends to remove more details than SD2.1. .. list-table:: Title :widths: 32 32 32 :header-rows: 1 * - SD 1.4 - SD 1.5 - SD 2.1 * - figure:: - figure:: - figure:: Args: p: probability of applying the transformation. version: the version of the stable diffusion model. step_range: the step range of the diffusion model steps. Higher the step, stronger the dissolving effects. keepdim: whether to keep the output shape the same as input (True) or broadcast it to the batch form (False). **kwargs: additional arguments for for HF StableDiffusionPipeline. Shape: - Input: :math: or :math:. - Output: :math:",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\dissolving.py",
    "ast_data": "ClassDef name:RandomDissolving FunctionDef name:__init__ arg:self arg:step_range arg:version arg:p arg:keepdim arguments arg arg arg arg arg arg Call Call Assign Assign Call Assign Call FunctionDef name:apply_transform arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_if_scalar_type_as",
    "source_code": "def _if_scalar_type_as(self, tensor):\n    if isinstance(self, _C.Value):\n        return self\n    scalar_type = _type_utils.JitScalarType.from_value(tensor, _type_utils.JitScalarType.UNDEFINED)\n    if scalar_type != _type_utils.JitScalarType.UNDEFINED:\n        ty = scalar_type.scalar_name().lower()\n        return getattr(self, ty)()\n    return self",
    "docstring": "Convert self into the same type of tensor, as necessary. We only support implicit casting for scalars, so we never actually need to insert an ONNX cast operator here; just fix up the scalar.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_helper.py",
    "ast_data": "FunctionDef name:_if_scalar_type_as arg:self arg:tensor arguments arg arg If Call Return return:yes Assign Call If Compare Assign Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "through",
    "source_code": "@classmethod\ndef through(cls, p0: Tensor, p1: Tensor) -> 'ParametrizedLine':\n    return ParametrizedLine(p0, normalize(p1 - p0, p=2, dim=-1))",
    "docstring": "Construct a parametrized line going from a point :math: to :math:. Args: p0: tensor with first point :math: where is the point dimension. p1: tensor with second point :math: where is the point dimension. Example: >>> p0 = torch.tensor([0.0, 0.0]) >>> p1 = torch.tensor([1.0, 1.0]) >>> l = ParametrizedLine.through(p0, p1)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\line.py",
    "ast_data": "FunctionDef name:through arg:cls arg:p0 arg:p1 arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_text",
    "source_code": "def get_text(self, lev, fmt):\n    if isinstance(lev, str):\n        return lev\n    elif isinstance(fmt, dict):\n        return fmt.get(lev, '%1.3f')\n    elif callable(getattr(fmt, 'format_ticks', None)):\n        return fmt.format_ticks([*self.labelLevelList, lev])[-1]\n    elif callable(fmt):\n        return fmt(lev)\n    else:\n        return fmt % lev",
    "docstring": "Get the text of the label.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:get_text arg:self arg:lev arg:fmt arguments arg arg arg If Call Return return:yes If Call Return return:yes Call If Call Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_autopacking_helper",
    "source_code": "def _autopacking_helper(list_or_tuple, dtype, name):\n    if context.executing_eagerly():\n        if all((isinstance(elem, core.Tensor) for elem in list_or_tuple)):\n            return gen_array_ops.pack(list_or_tuple, name=name)\n    must_pack = False\n    converted_elems = []\n    with ops.name_scope(name) as scope:\n        for i, elem in enumerate(list_or_tuple):\n            if isinstance(elem, core.Tensor):\n                if dtype is not None and elem.dtype.base_dtype != dtype:\n                    raise TypeError(f'Cannot convert a list containing a tensor of dtype {elem.dtype} to {dtype} (Tensor is: {elem!r})')\n                converted_elems.append(elem)\n                must_pack = True\n            elif isinstance(elem, (list, tuple)):\n                converted_elem = _autopacking_helper(elem, dtype, str(i))\n                if isinstance(converted_elem, core.Tensor):\n                    must_pack = True\n                converted_elems.append(converted_elem)\n            else:\n                converted_elems.append(elem)\n        if must_pack:\n            elems_as_tensors = []\n            for i, elem in enumerate(converted_elems):\n                if isinstance(elem, core.Tensor):\n                    elems_as_tensors.append(elem)\n                else:\n                    elems_as_tensors.append(constant_op.constant(elem, dtype=dtype, name=str(i)))\n            return gen_array_ops.pack(elems_as_tensors, name=scope)\n        else:\n            return converted_elems",
    "docstring": "Converts the given list or tuple to a tensor by packing. Args: list_or_tuple: A (possibly nested) list or tuple containing a tensor. dtype: The element type of the returned tensor. name: A name for the returned tensor. Returns: A with value equivalent to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_autopacking_helper arg:list_or_tuple arg:dtype arg:name arguments arg arg arg If Call If Call Call Return return:yes Call Assign Assign With Call For Call If Call If BoolOp Compare Compare Raise Call Call Assign If Call Assign Call Call If Call Assign Call Call If Assign For Call If Call Call Call Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CallMethod",
    "source_code": "class CallMethod(_TargetArgsExpr):\n    op = 'call_method'",
    "docstring": "Matches a call_method node in the FX graphs:",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "ClassDef name:CallMethod Assign"
  },
  {
    "library": "django",
    "name": "semi_minor",
    "source_code": "@property\ndef semi_minor(self):\n    return capi.semi_minor(self.ptr, byref(c_int()))",
    "docstring": "Return the Semi Minor Axis for this Spatial Reference.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:semi_minor arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "fromkeys",
    "source_code": "def fromkeys(self, keys: Iterable[str], default: Optional[Any]=None) -> ParameterDict:\n    return ParameterDict(((k, default) for k in keys))",
    "docstring": "Return a new ParameterDict with the keys provided. Args: keys (iterable, string): keys to make the new ParameterDict from default (Parameter, optional): value to set for all keys",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:fromkeys arg:self arg:keys arg:default arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "time_bounded",
    "source_code": "def time_bounded(self, bitgen, args):\n    dt, max = args\n    if bitgen == 'numpy':\n        self.rg.randint(0, max + 1, nom_size, dtype=dt)\n    else:\n        self.rg.integers(0, max + 1, nom_size, dtype=dt)",
    "docstring": "Timer for 8-bit bounded values. Parameters (packed as args) ---------- dt : {uint8, uint16, uint32, unit64} output dtype max : int Upper bound for range. Lower is always 0. Must be <= 2**bits.",
    "type": "method",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_random.py",
    "ast_data": "FunctionDef name:time_bounded arg:self arg:bitgen arg:args arguments arg arg arg Assign If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "config",
    "source_code": "def config(self):\n    raise ValueError('this method should be reimplemented by subclass')",
    "docstring": "returns an array for the current benchmark configs",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\tensorexpr\\benchmark.py",
    "ast_data": "FunctionDef name:config arg:self arguments arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "compute_area",
    "source_code": "def compute_area(self) -> torch.Tensor:\n    coords = self._data.view((-1, 4, 2)) if self._data.ndim == 4 else self._data\n    centroid = coords.mean(dim=1, keepdim=True)\n    angles = torch.atan2(coords[..., 1] - centroid[..., 1], coords[..., 0] - centroid[..., 0])\n    _, clockwise_indices = torch.sort(angles, dim=1, descending=True)\n    ordered_corners = torch.gather(coords, 1, clockwise_indices.unsqueeze(-1).expand(-1, -1, 2))\n    x, y = (ordered_corners[..., 0], ordered_corners[..., 1])\n    area = 0.5 * torch.abs(torch.sum(x * torch.roll(y, 1, 1) - y * torch.roll(x, 1, 1), dim=1))\n    return area.view(self._data.shape[:2]) if self._data.ndim == 4 else area",
    "docstring": "Return :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:compute_area arg:self arguments arg Assign Compare Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Assign Call Call Call Call Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "add_tools_to_container",
    "source_code": "def add_tools_to_container(container, tools=default_toolbar_tools):\n    for group, grouptools in tools:\n        for position, tool in enumerate(grouptools):\n            container.add_tool(tool, group, position)",
    "docstring": "Add multiple tools to the container. Parameters ---------- container : Container object that will get the tools added. tools : list, optional List in the form `.backend_bases.ToolContainerBase.add_tool` for details.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:add_tools_to_container arg:container arg:tools arguments arg arg For For Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_submodule",
    "source_code": "def get_submodule(self, target: str) -> 'Module':\n    if target == '':\n        return self\n    atoms: list[str] = target.split('.')\n    mod: torch.nn.Module = self\n    for item in atoms:\n        if not hasattr(mod, item):\n            raise AttributeError(mod._get_name() + ' has no attribute `' + item + '`')\n        mod = getattr(mod, item)\n        if not isinstance(mod, torch.nn.Module):\n            raise AttributeError('`' + item + '` is not an nn.Module')\n    return mod",
    "docstring": "Return the submodule given by ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:get_submodule arg:self arg:target arguments arg arg If Compare Return return:yes Call For If Call Raise Call Call Assign Call If Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "parse_equation",
    "source_code": "@classmethod\ndef parse_equation(cls, equation: str) -> tuple[list[str], str]:\n    inputs, outputs = equation.split('->')\n    input_dims, output_dims = (inputs.split(','), outputs.split(','))\n    assert len(input_dims) <= 2, 'Only support at most two inputs'\n    assert len(output_dims) == 1, 'Only support single output'\n    output_dim = output_dims[0]\n    return (input_dims, output_dim)",
    "docstring": "Parse the einsum equation str to input dim chars and output dim char",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_einsum_strategy.py",
    "ast_data": "FunctionDef name:parse_equation arg:cls arg:equation arguments arg arg Assign Call Assign Call Call Compare Call Compare Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "extract_valid_libs",
    "source_code": "def extract_valid_libs(filepath):\n\n    def repository_rule(**kwargs):\n        del kwargs\n    with open(filepath, 'r') as f:\n        f_globals = {'repository_rule': repository_rule}\n        f_locals = {}\n        exec(f.read(), f_globals, f_locals)\n    return set(f_locals['VALID_LIBS'])",
    "docstring": "Evaluate syslibs_configure.bzl, return the VALID_LIBS set from that file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\builds\\check_system_libs.py",
    "ast_data": "FunctionDef name:extract_valid_libs arg:filepath arguments arg FunctionDef name:repository_rule arguments arg With Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "tools",
    "source_code": "@property\ndef tools(self):\n    return self._tools",
    "docstring": "A dict mapping tool name -> controlled tool.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py",
    "ast_data": "FunctionDef name:tools arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "parse_settings",
    "source_code": "def parse_settings(rollout_state: str) -> Settings:\n    settings_text, _ = extract_settings_user_opt_in_from_text(rollout_state)\n    return parse_settings_from_text(settings_text)",
    "docstring": "Parse settings, if any, from the rollout state. If the issue body contains \"---\" then the text above that is the settings and the text below is the list of opted in users. If it doesn't contain \"---\" then the settings are empty and the default values are used.",
    "type": "function",
    "file_path": "pytorch\\.github\\scripts\\runner_determinator.py",
    "ast_data": "FunctionDef name:parse_settings arg:rollout_state arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "BNReLU2d",
    "source_code": "class BNReLU2d(_FusedModule):\n\n    def __init__(self, batch_norm, relu):\n        assert type_before_parametrizations(batch_norm) == BatchNorm2d and type_before_parametrizations(relu) == ReLU, f'Incorrect types for input modules{type_before_parametrizations(batch_norm)}{type_before_parametrizations(relu)}'\n        super().__init__(batch_norm, relu)",
    "docstring": "This is a sequential container which calls the BatchNorm 2d and ReLU modules. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:BNReLU2d FunctionDef name:__init__ arg:self arg:batch_norm arg:relu arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "conda_package_to_pip",
    "source_code": "def conda_package_to_pip(package: str):\n    package = re.sub('(?<=[^<>~])=', '==', package).strip()\n    for compare in ('<=', '>=', '=='):\n        if compare in package:\n            pkg, version = package.split(compare)\n            if pkg in EXCLUDE:\n                return\n            if pkg in REMAP_VERSION:\n                return ''.join((pkg, compare, REMAP_VERSION[pkg]))\n            if pkg in CONDA_TO_PIP:\n                return ''.join((CONDA_TO_PIP[pkg], compare, version))\n    if package in EXCLUDE:\n        return\n    if package in CONDA_TO_PIP:\n        return CONDA_TO_PIP[package]\n    return package",
    "docstring": "Convert a conda package to its pip equivalent. In most cases they are the same, those are the exceptions: - Packages that should be excluded (in ) - Packages that should be renamed (in ) - A package requiring a specific version, in conda is defined with a single equal (e.g. ``)",
    "type": "function",
    "file_path": "pandas\\scripts\\generate_pip_deps_from_conda.py",
    "ast_data": "FunctionDef name:conda_package_to_pip arg:package arguments arg Assign Call Call For If Compare Assign Call If Compare Return return:no If Compare Return return:yes Call If Compare Return return:yes Call If Compare Return return:no If Compare Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "docname",
    "source_code": "@property\ndef docname(self) -> str:\n    return self.current_document.docname",
    "docstring": "Returns the docname of the document currently being parsed.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:docname arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_skip_traverse",
    "source_code": "def _skip_traverse(self, all_nodes: NodeList, skip_nodes: list) -> NodeSet:\n    start_idx = 0\n    num_nodes = len(all_nodes)\n    idx = 0\n    culprits = set()\n    while idx < num_nodes:\n        node = all_nodes[idx]\n        if node.name in skip_nodes:\n            if idx > start_idx:\n                culprits = self._skip_traverse_impl(all_nodes, start_idx, idx)\n            start_idx = idx + 1\n        elif idx == num_nodes - 1 and start_idx <= idx:\n            culprits = self._skip_traverse_impl(all_nodes, start_idx, idx + 1)\n        idx += 1\n    return culprits",
    "docstring": "Skip certain nodes in graph based on settings",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "FunctionDef name:_skip_traverse arg:self arg:all_nodes arg:skip_nodes arguments arg arg arg Assign Assign Call Assign Assign Call While Compare Assign If Compare If Compare Assign Call Assign If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "debug_dump",
    "source_code": "def debug_dump(self, debug_path):\n    return super().debug_dump(debug_path)",
    "docstring": "Arguments: debug_path (required): Path to dump the graph to. Calls a debugging function to dump the graph if the debugging is enabled via CUDAGraph.enable_debug_mode()",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\graphs.py",
    "ast_data": "FunctionDef name:debug_dump arg:self arg:debug_path arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "node_copy",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef node_copy(self, node: Node, arg_transform: Callable[[Node], 'Argument']=lambda x: x) -> Node:\n    args = map_arg(node.args, arg_transform)\n    kwargs = map_arg(node.kwargs, arg_transform)\n    assert isinstance(args, tuple)\n    assert isinstance(kwargs, dict)\n    result_node = self.create_node(node.op, node.target, args, kwargs, node.name, node.type)\n    result_node.meta = copy.copy(node.meta)\n    return result_node",
    "docstring": "Copy a node from one graph into another. `gnew_graph`.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:node_copy arg:self arg:node arg:arg_transform arguments arg arg arg arguments arg Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_step",
    "source_code": "@tf_export('summary.experimental.get_step', v1=[])\ndef get_step():\n    return _summary_state.step",
    "docstring": "Returns the default summary step for the current thread. Returns: The step set by if one has been set, otherwise None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:get_step arguments Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "AxesX",
    "source_code": "class AxesX(_Base):\n\n    def __init__(self, axes, aspect=1.0, ref_ax=None):\n        self._axes = axes\n        self._aspect = aspect\n        if aspect == 'axes' and ref_ax is None:\n            raise ValueError(\"ref_ax must be set when aspect='axes'\")\n        self._ref_ax = ref_ax\n\n    def get_size(self, renderer):\n        l1, l2 = self._axes.get_xlim()\n        if self._aspect == 'axes':\n            ref_aspect = _get_axes_aspect(self._ref_ax)\n            aspect = ref_aspect / _get_axes_aspect(self._axes)\n        else:\n            aspect = self._aspect\n        rel_size = abs(l2 - l1) * aspect\n        abs_size = 0.0\n        return (rel_size, abs_size)",
    "docstring": "Scaled size whose relative part corresponds to the data width of the *axes* multiplied by the *aspect*.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py",
    "ast_data": "ClassDef name:AxesX FunctionDef name:__init__ arg:self arg:axes arg:aspect arg:ref_ax arguments arg arg arg arg Assign Assign If BoolOp Compare Compare Raise Call Assign FunctionDef name:get_size arg:self arg:renderer arguments arg arg Assign Call If Compare Assign Call Assign Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "_init_cipher",
    "source_code": "def _init_cipher(ciphername: bytes, password: bytes | None, salt: bytes, rounds: int) -> Cipher[modes.CBC | modes.CTR | modes.GCM]:\n    if not password:\n        raise TypeError('Key is password-protected, but password was not provided.')\n    ciph = _SSH_CIPHERS[ciphername]\n    seed = _bcrypt_kdf(password, salt, ciph.key_len + ciph.iv_len, rounds, True)\n    return Cipher(ciph.alg(seed[:ciph.key_len]), ciph.mode(seed[ciph.key_len:]))",
    "docstring": "Generate key + iv and return cipher.",
    "type": "function",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:_init_cipher arg:ciphername arg:password arg:salt arg:rounds arguments arg arg arg arg If Raise Call Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "inputs",
    "source_code": "@property\ndef inputs(self):\n    return self._func_graph.inputs",
    "docstring": "Returns tensors in corresponding to arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:inputs arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "groupby",
    "source_code": "@final\ndef groupby(self, values) -> PrettyDict[Hashable, Index]:\n    if isinstance(values, ABCMultiIndex):\n        values = values._values\n    values = Categorical(values)\n    result = values._reverse_indexer()\n    result = {k: self.take(v) for k, v in result.items()}\n    return PrettyDict(result)",
    "docstring": "Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- dict {group name -> group labels}",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:groupby arg:self arg:values arguments arg arg If Call Assign Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "graph_break",
    "source_code": "@_disallow_in_graph_helper(throw_if_not_allowed=False)\ndef graph_break(msg=''):\n    pass",
    "docstring": "Force a graph break",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\decorators.py",
    "ast_data": "FunctionDef name:graph_break arg:msg arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "get_all_unique_feature_names",
    "source_code": "def get_all_unique_feature_names(self, plottable_features_only: bool=True) -> set[str]:\n    unique_feature_names = set()\n    for module_fqn in self.generated_reports:\n        feature_dict: dict[str, Any] = self.generated_reports[module_fqn]\n        for feature_name in feature_dict:\n            if not plottable_features_only or type(feature_dict[feature_name]) == torch.Tensor:\n                unique_feature_names.add(feature_name)\n    return unique_feature_names",
    "docstring": "The purpose of this method is to provide a user the set of all feature names so that if they wish to use the filtering capabilities of the generate_table_view(), or use either of the generate_plot_view() or generate_histogram_view(), they don't need to manually parse the generated_reports dictionary to get this information. Args: plottable_features_only (bool): True if the user is only looking for plottable features, False otherwise plottable features are those that are tensor values Default: True (only return those feature names that are plottable) Returns all the unique module fqns present in the reports the ModelReportVisualizer instance was initialized with.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report_visualizer.py",
    "ast_data": "FunctionDef name:get_all_unique_feature_names arg:self arg:plottable_features_only arguments arg arg Assign Call For For If BoolOp Compare Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "validate_insert_loc",
    "source_code": "def validate_insert_loc(loc: int, length: int) -> int:\n    if not is_integer(loc):\n        raise TypeError(f'loc must be an integer between -{length} and {length}')\n    if loc < 0:\n        loc += length\n    if not 0 <= loc <= length:\n        raise IndexError(f'loc must be an integer between -{length} and {length}')\n    return loc",
    "docstring": "Check that we have an integer between -length and length, inclusive. Standardize negative loc to within [0, length]. The exceptions we raise on failure match np.insert.",
    "type": "function",
    "file_path": "pandas\\pandas\\util\\_validators.py",
    "ast_data": "FunctionDef name:validate_insert_loc arg:loc arg:length arguments arg arg If Call Raise Call If Compare If Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "trackable_children",
    "source_code": "def trackable_children(self, serialization_cache):\n    if not utils.should_save_traces():\n        return {}\n    children = self.objects_to_serialize(serialization_cache)\n    children.update(self.functions_to_serialize(serialization_cache))\n    return children",
    "docstring": "Lists all Trackable children connected to this object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\base_serialization.py",
    "ast_data": "FunctionDef name:trackable_children arg:self arg:serialization_cache arguments arg arg If Call Return return:no Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_create",
    "source_code": "@staticmethod\ndef _create(ckdtree_node=None):\n    if ckdtree_node is None:\n        return KDTree.node(ckdtree_node)\n    elif ckdtree_node.split_dim == -1:\n        return KDTree.leafnode(ckdtree_node)\n    else:\n        return KDTree.innernode(ckdtree_node)",
    "docstring": "Create either an inner or leaf node, wrapping a cKDTreeNode instance",
    "type": "method",
    "file_path": "scipy\\scipy\\spatial\\_kdtree.py",
    "ast_data": "FunctionDef name:_create arg:ckdtree_node arguments arg If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Bukin06",
    "source_code": "class Bukin06(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-15.0, -5.0), (-3.0, 3.0)]\n        self.global_optimum = [[-10.0, 1.0]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return 100 * sqrt(abs(x[1] - 0.01 * x[0] ** 2)) + 0.01 * abs(x[0] + 10)",
    "docstring": "Bukin06 objective function. The Bukin06 [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Bukin06}}(x) = 100 \\sqrt{ \\lvert{x_2 - 0.01 x_1^{2}} \\rvert} + 0.01 \\lvert{x_1 + 10} \\rvert with :math: *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py",
    "ast_data": "ClassDef name:Bukin06 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_compute_tauk",
    "source_code": "def _compute_tauk(n, k, maxit=5):\n    a = n % 2 - 0.5\n    c = (4.0 * floor(n / 2.0) - 4.0 * k + 3.0) * pi / (4.0 * floor(n / 2.0) + 2.0 * a + 2.0)\n\n    def f(x):\n        return x - sin(x) - c\n\n    def df(x):\n        return 1.0 - cos(x)\n    xi = 0.5 * pi\n    for i in range(maxit):\n        xi = xi - f(xi) / df(xi)\n    return xi",
    "docstring": "Helper function for Tricomi initial guesses For details, see formula 3.1 in lemma 3.1 in the original paper. Parameters ---------- n : int Quadrature order k : ndarray of type int Index of roots :math: to compute maxit : int Number of Newton maxit performed, the default value of 5 is sufficient. Returns ------- tauk : ndarray Roots of equation 3.1 See Also -------- initial_nodes_a roots_hermite_asy",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:_compute_tauk arg:n arg:k arg:maxit arguments arg arg arg Assign Assign Call Call FunctionDef name:f arg:x arguments arg Return return:yes Call FunctionDef name:df arg:x arguments arg Return return:yes Call Assign For Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "shear_y",
    "source_code": "def shear_y(min_mag: float, max_mag: float) -> OperationBase:\n    if min_mag != -max_mag:\n        raise ValueError(f'{ShearY.__name__} is a symmetric operation that `- min_mag == max_mag`. Got [{min_mag}, {max_mag}]')\n    return ShearY(None, 1.0, magnitude_range=(0.0, max_mag), symmetric_megnitude=True)",
    "docstring": "Return ShearY op.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\auto\\rand_augment\\ops.py",
    "ast_data": "FunctionDef name:shear_y arg:min_mag arg:max_mag arguments arg arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_add_file",
    "source_code": "def _add_file(self, filename: str):\n    *prefix, last = filename.split('/')\n    if len(prefix) > 1 and prefix[0] == '.data':\n        return\n    package = self._get_or_create_package(prefix)\n    if isinstance(package, _ExternNode):\n        raise ImportError(f'inconsistent module structure. package contains a module file {filename} that is a subpackage of a module marked external.')\n    if last == '__init__.py':\n        package.source_file = filename\n    elif last.endswith('.py'):\n        package_name = last[:-len('.py')]\n        package.children[package_name] = _ModuleNode(filename)",
    "docstring": "Assembles a Python module out of the given file. Will ignore files in the .data directory. Args: filename (str): the name of the file inside of the package archive to be added",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\package_importer.py",
    "ast_data": "FunctionDef name:_add_file arg:self arg:filename arguments arg arg Assign Call If BoolOp Compare Call Compare Return return:no Assign Call If Call Raise Call If Compare Assign If Call Assign Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_bbox",
    "source_code": "def get_bbox(self, renderer):\n    bbox, offsets = self._get_bbox_and_child_offsets(renderer)\n    return bbox",
    "docstring": "Return the bbox of the offsetbox, ignoring parent offsets.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:get_bbox arg:self arg:renderer arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ThreadLocalStack",
    "source_code": "class ThreadLocalStack(threading.local):\n\n    def __init__(self):\n        super(ThreadLocalStack, self).__init__()\n        self._stack = []\n\n    def peek(self):\n        return self._stack[-1] if self._stack else None\n\n    def push(self, ctx):\n        return self._stack.append(ctx)\n\n    def pop(self):\n        self._stack.pop()",
    "docstring": "A thread-local stack of objects for providing implicit defaults.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\experimental\\thread_local_stack.py",
    "ast_data": "ClassDef name:ThreadLocalStack FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:peek arg:self arguments arg Return return:yes FunctionDef name:push arg:self arg:ctx arguments arg arg Return return:yes Call FunctionDef name:pop arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "__div__",
    "source_code": "def __div__(self, other):\n    return self // other",
    "docstring": "DEPRECATED: Use via instead. This function exists only for backwards compatibility purposes; new code should use via the syntax . Using communicates clearly that the result rounds down, and is forward compatible to Python 3. Args: other: Another . Returns: A whose value is the integer quotient of and .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__div__ arg:self arg:other arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_override_helper",
    "source_code": "def _override_helper(clazz_object, operator, func):\n    if operator not in Tensor.OVERLOADABLE_OPERATORS:\n        raise ValueError(f'Overriding {operator} is disallowed. Allowed operators are {Tensor.OVERLOADABLE_OPERATORS}.')\n    setattr(clazz_object, operator, func)",
    "docstring": "Overrides (string) operator on Tensors to call func. Args: clazz_object: the class to override for; either Tensor or SparseTensor. operator: the string name of the operator to override. func: the function that replaces the overridden operator. Raises: ValueError: If operator is not allowed to be overwritten.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:_override_helper arg:clazz_object arg:operator arg:func arguments arg arg arg If Compare Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "zero_grad",
    "source_code": "def zero_grad(self, set_to_none: bool=True):\n    self._optim.zero_grad(set_to_none)",
    "docstring": "Resets the gradients of all optimized :class: s. Args: set_to_none (bool): instead of setting to zero, set the grads to None. This will in general have lower memory footprint, and can modestly improve performance. However, it changes certain behaviors. For example: 1. When the user tries to access a gradient and perform manual ops on it, a None attribute or a Tensor full of 0s will behave differently. 2. If the user requests `` optimizers have a different behavior if the gradient is 0 or None (in one case it does the step with a gradient of 0 and in the other it skips the step altogether).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_optim\\api.py",
    "ast_data": "FunctionDef name:zero_grad arg:self arg:set_to_none arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "add_operations",
    "source_code": "def add_operations(table, operation, collective_indent, operation_indent):\n    table += f'\\x1b[1;33m{collective_indent}**{operation_name}\\x1b[0m\\n'\n    if len(operation['input_shape']):\n        operation_shape = operation['input_shape']\n        operation_sharding = operation['input_sharding']\n        operation_device_mesh = operation['device_mesh']\n        table += f'\\x1b[1;31m{operation_indent}shape: {operation_shape}\\x1b[0m\\n'\n        table += f'\\x1b[1;31m{operation_indent}sharding: {operation_sharding}\\x1b[0m\\n'\n        table += f'\\x1b[1;31m{operation_indent}device mesh: {operation_device_mesh}\\x1b[0m\\n'\n    return table",
    "docstring": "adds operation information to the table",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_comm_mode.py",
    "ast_data": "FunctionDef name:add_operations arg:table arg:operation arg:collective_indent arg:operation_indent arguments arg arg arg arg If Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "normalize_homography3d",
    "source_code": "def normalize_homography3d(dst_pix_trans_src_pix: Tensor, dsize_src: tuple[int, int, int], dsize_dst: tuple[int, int, int]) -> Tensor:\n    if not isinstance(dst_pix_trans_src_pix, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(dst_pix_trans_src_pix)}')\n    if not (len(dst_pix_trans_src_pix.shape) == 3 or dst_pix_trans_src_pix.shape[-2:] == (4, 4)):\n        raise ValueError(f'Input dst_pix_trans_src_pix must be a Bx3x3 tensor. Got {dst_pix_trans_src_pix.shape}')\n    src_d, src_h, src_w = dsize_src\n    dst_d, dst_h, dst_w = dsize_dst\n    src_norm_trans_src_pix: Tensor = normal_transform_pixel3d(src_d, src_h, src_w).to(dst_pix_trans_src_pix)\n    src_pix_trans_src_norm = _torch_inverse_cast(src_norm_trans_src_pix)\n    dst_norm_trans_dst_pix: Tensor = normal_transform_pixel3d(dst_d, dst_h, dst_w).to(dst_pix_trans_src_pix)\n    dst_norm_trans_src_norm: Tensor = dst_norm_trans_dst_pix @ (dst_pix_trans_src_pix @ src_pix_trans_src_norm)\n    return dst_norm_trans_src_norm",
    "docstring": "Normalize a given homography in pixels to [-1, 1]. Args: dst_pix_trans_src_pix: homography/ies from source to destination to be normalized. :math: dsize_src: size of the source image (depth, height, width). dsize_dst: size of the destination image (depth, height, width). Returns: the normalized homography. Shape: Output: :math:",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:normalize_homography3d arg:dst_pix_trans_src_pix arg:dsize_src arg:dsize_dst arguments arg arg arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Assign Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_functions_run_eagerly",
    "source_code": "@deprecation.deprecated(None, 'Use tf.config.functions_run_eagerly instead of the experimental version.')\n@tf_export('config.experimental_functions_run_eagerly')\ndef experimental_functions_run_eagerly():\n    return functions_run_eagerly()",
    "docstring": "Returns the value of the setting.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\eager_function_run.py",
    "ast_data": "FunctionDef name:experimental_functions_run_eagerly arguments Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "table_name_col",
    "source_code": "@classmethod\ndef table_name_col(cls):\n    return 'table_name'",
    "docstring": "Return the name of the metadata column used to store the feature table name.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\oracle\\models.py",
    "ast_data": "FunctionDef name:table_name_col arg:cls arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "visit_AsyncFunctionDef",
    "source_code": "def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:\n    self.visit_FunctionDef(node)",
    "docstring": "Handles AsyncFunctionDef node and set context.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:visit_AsyncFunctionDef arg:self arg:node arguments arg arg Call"
  },
  {
    "library": "authlib",
    "name": "validate_auth_time",
    "source_code": "def validate_auth_time(self):\n    auth_time = self.get('auth_time')\n    if self.params.get('max_age') and (not auth_time):\n        raise MissingClaimError('auth_time')\n    if auth_time and (not isinstance(auth_time, (int, float))):\n        raise InvalidClaimError('auth_time')",
    "docstring": "Time when the End-User authentication occurred. Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z as measured in UTC until the date/time. When a max_age request is made or when auth_time is requested as an Essential Claim, then this Claim is REQUIRED; otherwise, its inclusion is OPTIONAL.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\claims.py",
    "ast_data": "FunctionDef name:validate_auth_time arg:self arguments arg Assign Call If BoolOp Call Raise Call If BoolOp Call Raise Call"
  },
  {
    "library": "scrapy",
    "name": "close_spider",
    "source_code": "def close_spider(self, spider: Spider | None=None) -> Deferred[Spider]:\n    if spider is not None:\n        warnings.warn(\"Passing a 'spider' argument to Scraper.close_spider() is deprecated.\", category=ScrapyDeprecationWarning, stacklevel=2)\n    if self.slot is None:\n        raise RuntimeError('Scraper slot not assigned')\n    self.slot.closing = Deferred()\n    self.slot.closing.addCallback(self.itemproc.close_spider)\n    self._check_if_closing()\n    return self.slot.closing",
    "docstring": "Close a spider being scraped and release its resources",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\scraper.py",
    "ast_data": "FunctionDef name:close_spider arg:self arg:spider arguments arg arg If Compare Call If Compare Raise Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_breadth_first_traversal",
    "source_code": "def _breadth_first_traversal(self):\n    trackable_objects, _ = super(_AugmentedGraphView, self)._breadth_first_traversal()\n    asset_paths = object_identity.ObjectIdentityDictionary()\n    constant_captures = object_identity.ObjectIdentityDictionary()\n    for obj in trackable_objects:\n        if isinstance(obj, asset.Asset):\n            asset_paths[obj.asset_path] = obj\n        if isinstance(obj, saved_model_utils.TrackableConstant):\n            constant_captures[obj.capture] = obj\n\n    def _get_merged_trackable(x):\n        if isinstance(x, asset.Asset):\n            return asset_paths[x.asset_path]\n        if isinstance(x, saved_model_utils.TrackableConstant):\n            if x.capture in asset_paths:\n                return asset_paths[x.capture]\n            else:\n                return constant_captures[x.capture]\n        return x\n    for obj in list(self._children_cache.keys()):\n        if _get_merged_trackable(obj) is not obj:\n            del self._children_cache[obj]\n            continue\n        for name, child in self._children_cache[obj].items():\n            self._children_cache[obj][name] = _get_merged_trackable(child)\n    return super(_AugmentedGraphView, self)._breadth_first_traversal()",
    "docstring": "Returns all trackable objects in the SavedObjectGraph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:_breadth_first_traversal arg:self arguments arg Assign Call Call Assign Call Assign Call For If Call Assign If Call Assign FunctionDef name:_get_merged_trackable arg:x arguments arg If Call Return return:yes If Call If Compare Return return:yes Return return:yes Return return:yes For Call Call If Compare Call For Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "_update_legend_data",
    "source_code": "def _update_legend_data(self, ax):\n    data = {}\n    if ax.legend_ is not None and self._extract_legend_handles:\n        handles = get_legend_handles(ax.legend_)\n        labels = [t.get_text() for t in ax.legend_.texts]\n        data.update({label: handle for handle, label in zip(handles, labels)})\n    handles, labels = ax.get_legend_handles_labels()\n    data.update({label: handle for handle, label in zip(handles, labels)})\n    self._legend_data.update(data)\n    ax.legend_ = None",
    "docstring": "Extract the legend data from an axes object and save it.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:_update_legend_data arg:self arg:ax arguments arg arg Assign If BoolOp Compare Assign Call Assign Call Call Call Assign Call Call Call Call Assign"
  },
  {
    "library": "numpy",
    "name": "_parse_env_order",
    "source_code": "def _parse_env_order(base_order, env):\n    order_str = os.environ.get(env, None)\n    base_order = [order.lower() for order in base_order]\n    if order_str is None:\n        return (base_order, [])\n    neg = order_str.startswith(('^', '!'))\n    order_str_l = list(order_str)\n    sum_neg = order_str_l.count('^') + order_str_l.count('!')\n    if neg:\n        if sum_neg > 1:\n            raise ValueError(f\"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}\")\n        order_str = order_str[1:]\n    elif sum_neg > 0:\n        raise ValueError(f\"Environment variable '{env}' may not mix negated an non-negated items: {order_str}\")\n    orders = order_str.lower().split(',')\n    unknown_order = []\n    if neg:\n        allow_order = base_order.copy()\n        for order in orders:\n            if not order:\n                continue\n            if order not in base_order:\n                unknown_order.append(order)\n                continue\n            if order in allow_order:\n                allow_order.remove(order)\n    else:\n        allow_order = []\n        for order in orders:\n            if not order:\n                continue\n            if order not in base_order:\n                unknown_order.append(order)\n                continue\n            if order not in allow_order:\n                allow_order.append(order)\n    return (allow_order, unknown_order)",
    "docstring": "Parse an environment variable by splitting with \",\" and only returning elements from This method will sequence the environment variable and check for their individual elements in . The items in the environment variable may be negated via '^item' or '!itema,itemb'. It must start with ^/! to negate all options. Raises ------ ValueError: for mixed negated and non-negated orders or multiple negated orders Parameters ---------- base_order : list of str the base list of orders env : str the environment variable to be parsed, if none is found, is returned Returns ------- allow_order : list of str allowed orders in lower-case unknown_order : list of str for values not overlapping with",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "FunctionDef name:_parse_env_order arg:base_order arg:env arguments arg arg Assign Call Assign Call If Compare Return return:yes Assign Call Assign Call Assign Call Call If If Compare Raise Call Assign If Compare Raise Call Assign Call Call Assign If Assign Call For If If Compare Call If Compare Call Assign For If If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_precision_parameters",
    "source_code": "def _check_precision_parameters(self, X):\n    _, n_features = X.shape\n    if self.degrees_of_freedom_prior is None:\n        self.degrees_of_freedom_prior_ = n_features\n    elif self.degrees_of_freedom_prior > n_features - 1.0:\n        self.degrees_of_freedom_prior_ = self.degrees_of_freedom_prior\n    else:\n        raise ValueError(\"The parameter 'degrees_of_freedom_prior' should be greater than %d, but got %.3f.\" % (n_features - 1, self.degrees_of_freedom_prior))",
    "docstring": "Check the prior parameters of the precision distribution. Parameters ---------- X : array-like of shape (n_samples, n_features)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_check_precision_parameters arg:self arg:X arguments arg arg Assign If Compare Assign If Compare Assign Raise Call"
  },
  {
    "library": "pytorch",
    "name": "example_value_from_input_node",
    "source_code": "def example_value_from_input_node(self, node: torch.fx.Node):\n    if node.op == 'placeholder':\n        return node.meta['grapharg'].example\n    assert node.op == 'get_attr'\n    return self.nn_modules[node.target]",
    "docstring": "Extract the non-fake example tensor",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\output_graph.py",
    "ast_data": "FunctionDef name:example_value_from_input_node arg:self arg:node arguments arg arg If Compare Return return:yes Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "target",
    "source_code": "@property\ndef target(self) -> str:\n    return '{0}://localhost:{1}'.format(self._config.protocol, self._server.bound_port())",
    "docstring": "Returns a target that can be used to connect to the server. >>> dispatcher = tf.data.experimental.service.DispatchServer() >>> dataset = tf.data.Dataset.range(10) >>> dataset = dataset.apply(tf.data.experimental.service.distribute( ... processing_mode=\"parallel_epochs\", service=dispatcher.target)) The returned string will be in the form protocol://address, e.g. \"grpc://localhost:5050\".",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:target arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "kleene_or",
    "source_code": "def kleene_or(left: bool | np.ndarray | libmissing.NAType, right: bool | np.ndarray | libmissing.NAType, left_mask: np.ndarray | None, right_mask: np.ndarray | None) -> tuple[npt.NDArray[np.bool_], npt.NDArray[np.bool_]]:\n    if left_mask is None:\n        return kleene_or(right, left, right_mask, left_mask)\n    if not isinstance(left, np.ndarray):\n        raise TypeError('Either `left` or `right` need to be a np.ndarray.')\n    raise_for_nan(right, method='or')\n    if right is libmissing.NA:\n        result = left.copy()\n    else:\n        result = left | right\n    if right_mask is not None:\n        left_false = ~(left | left_mask)\n        right_false = ~(right | right_mask)\n        mask = left_false & right_mask | right_false & left_mask | left_mask & right_mask\n    elif right is True:\n        mask = np.zeros_like(left_mask)\n    elif right is libmissing.NA:\n        mask = ~left & ~left_mask | left_mask\n    else:\n        mask = left_mask.copy()\n    return (result, mask)",
    "docstring": "Boolean `leftright` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical or, and the new mask.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\mask_ops.py",
    "ast_data": "FunctionDef name:kleene_or arg:left arg:right arg:left_mask arg:right_mask arguments arg arg arg arg If Compare Return return:yes Call If Call Raise Call Call If Compare Assign Call Assign If Compare Assign Assign Assign If Compare Assign Call If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_op_from_tf_operation",
    "source_code": "def _create_op_from_tf_operation(self, c_op, compute_device=True) -> 'Operation':\n    self._check_not_finalized()\n    ret = Operation._from_c_op(c_op=c_op, g=self)\n    name_key = ret.name.lower()\n    if name_key not in self._names_in_use:\n        self._names_in_use[name_key] = 1\n    self._create_op_helper(ret, compute_device=compute_device)\n    return ret",
    "docstring": "Creates an in this graph from the supplied TF_Operation. This method is like create_op() except the new Operation is constructed using . The returned Operation will have as its _c_op field. This is used to create Operation objects around TF_Operations created indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile). This function does not call Operation._control_flow_post_processing or Graph._control_dependencies_for_inputs (since the inputs may not be available yet). The caller is responsible for calling these methods. Args: c_op: a wrapped TF_Operation compute_device: (Optional.) If True, device functions will be executed to compute the device property of the Operation. Returns: An object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_create_op_from_tf_operation arg:self arg:c_op arg:compute_device arguments arg arg arg Call Assign Call Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "CSVTable",
    "source_code": "class CSVTable(tables.CSVTable):\n\n    def run(self) -> list[Node]:\n        if 'file' in self.options and self.options['file'].startswith((SEP, os.sep)):\n            env = self.state.document.settings.env\n            filename = Path(self.options['file'])\n            if filename.exists():\n                logger.warning(__('\":file:\" option for csv-table directive now recognizes an absolute path as a relative path from source directory. Please update your document.'), location=(env.docname, self.lineno))\n            else:\n                abspath = env.srcdir / self.options['file'][1:]\n                doc_dir = env.doc2path(env.docname).parent\n                self.options['file'] = relpath(abspath, doc_dir)\n        return super().run()",
    "docstring": "The csv-table directive which searches a CSV file from Sphinx project's source directory when an absolute path is given via :file: option.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\directives\\patches.py",
    "ast_data": "ClassDef name:CSVTable FunctionDef name:run arg:self arguments arg If BoolOp Compare Call Assign Assign Call If Call Call Call Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_bound_items",
    "source_code": "def _bound_items(self):\n    for name in self.fields:\n        yield (name, self[name])",
    "docstring": "Yield (name, bf) pairs, where bf is a BoundField object.",
    "type": "method",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "FunctionDef name:_bound_items arg:self arguments arg For"
  },
  {
    "library": "tensorflow",
    "name": "_do_not_descend",
    "source_code": "def _do_not_descend(self, path, name):\n    return path in self._do_not_descend_map and name in self._do_not_descend_map[path]",
    "docstring": "Safely queries if a specific fully qualified name should be excluded.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\tools\\common\\public_api.py",
    "ast_data": "FunctionDef name:_do_not_descend arg:self arg:path arg:name arguments arg arg arg Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "pandas",
    "name": "comparison_op",
    "source_code": "def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike:\n    lvalues = ensure_wrapped_if_datetimelike(left)\n    rvalues = ensure_wrapped_if_datetimelike(right)\n    rvalues = lib.item_from_zerodim(rvalues)\n    if isinstance(rvalues, list):\n        rvalues = np.asarray(rvalues)\n    if isinstance(rvalues, (np.ndarray, ABCExtensionArray)):\n        if len(lvalues) != len(rvalues):\n            raise ValueError('Lengths must match to compare', lvalues.shape, rvalues.shape)\n    if should_extension_dispatch(lvalues, rvalues) or ((isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT) and lvalues.dtype != object):\n        res_values = op(lvalues, rvalues)\n    elif is_scalar(rvalues) and isna(rvalues):\n        if op is operator.ne:\n            res_values = np.ones(lvalues.shape, dtype=bool)\n        else:\n            res_values = np.zeros(lvalues.shape, dtype=bool)\n    elif is_numeric_v_string_like(lvalues, rvalues):\n        return invalid_comparison(lvalues, rvalues, op)\n    elif lvalues.dtype == object or isinstance(rvalues, str):\n        res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues)\n    else:\n        res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True)\n    return res_values",
    "docstring": "Evaluate a comparison operation , , , , , or . Note: the caller is responsible for ensuring that numpy warnings are suppressed (with np.errstate(all=\"ignore\")) if needed. Parameters ---------- left : np.ndarray or ExtensionArray right : object Cannot be a DataFrame, Series, or Index. op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le} Returns ------- ndarray or ExtensionArray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:comparison_op arg:left arg:right arg:op arguments arg arg arg Assign Call Assign Call Assign Call If Call Assign Call If Call If Compare Call Call Raise Call If BoolOp Call BoolOp BoolOp Call Compare Compare Assign Call If BoolOp Call Call If Compare Assign Call Assign Call If Call Return return:yes Call If BoolOp Compare Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "SphinxWarning",
    "source_code": "class SphinxWarning(SphinxError):\n    category = 'Warning, treated as error'",
    "docstring": "Warning, treated as error.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\errors.py",
    "ast_data": "ClassDef name:SphinxWarning Assign"
  },
  {
    "library": "matplotlib",
    "name": "twiny",
    "source_code": "def twiny(self, axes_class=None):\n    ax = self._add_twin_axes(axes_class, sharey=self)\n    self.axis['top'].set_visible(False)\n    ax.axis['top'].set_visible(True)\n    ax.axis['left', 'right', 'bottom'].set_visible(False)\n    return ax",
    "docstring": "Create a twin of Axes with a shared y-axis but independent x-axis. The x-axis of self will have ticks on the bottom and the returned axes will have ticks on the top.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\parasite_axes.py",
    "ast_data": "FunctionDef name:twiny arg:self arg:axes_class arguments arg arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dynamic_growth_steps",
    "source_code": "@property\ndef dynamic_growth_steps(self):\n    if isinstance(self._loss_scale, _DynamicLossScaleState):\n        return self._loss_scale.growth_steps\n    else:\n        return None",
    "docstring": "The number of steps it takes to increase the loss scale. This is None if is False. Every consecutive steps with finite gradients, the loss scale is increased.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "FunctionDef name:dynamic_growth_steps arg:self arguments arg If Call Return return:yes Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "new_axes",
    "source_code": "def new_axes(self, ax, *, _props=None, _init=False):\n    reconnect = False\n    if _init or self.canvas is not ax.get_figure(root=True).canvas:\n        if self.canvas is not None:\n            self.disconnect_events()\n        reconnect = True\n    self.ax = ax\n    if reconnect:\n        self.connect_default_events()\n    self._selection_completed = False\n    if self.direction == 'horizontal':\n        trans = ax.get_xaxis_transform()\n        w, h = (0, 1)\n    else:\n        trans = ax.get_yaxis_transform()\n        w, h = (1, 0)\n    rect_artist = Rectangle((0, 0), w, h, transform=trans, visible=False)\n    if _props is not None:\n        rect_artist.update(_props)\n    elif self._selection_artist is not None:\n        rect_artist.update_from(self._selection_artist)\n    self.ax.add_patch(rect_artist)\n    self._selection_artist = rect_artist",
    "docstring": "Set SpanSelector to operate on a new Axes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:new_axes arg:self arg:ax arguments arg arg arg arg Assign If BoolOp Compare Call If Compare Call Assign Assign If Call Assign If Compare Assign Call Assign Assign Call Assign Assign Call If Compare Call If Compare Call Call Assign"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, bus, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):\n    SimplePlugin.__init__(self, bus)\n    self.stdin = stdin\n    self.stdout = stdout\n    self.stderr = stderr\n    self.finalized = False",
    "docstring": "Initialize the daemonizer plugin.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:bus arg:stdin arg:stdout arg:stderr arguments arg arg arg arg arg Call Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_node_only_used_for_sym_size",
    "source_code": "def _node_only_used_for_sym_size(node: Node, partition_nodes: list[Node]):\n    if _is_sym_size_node(node):\n        return True\n    return all((user not in partition_nodes or _is_sym_size_node(user) for user in node.users))",
    "docstring": "This utility is used to handle cases when dynami_shape=True tracing leads to symint nodes in the pattern of linear module. In those cases, we need to distinguish between the nodes that are in input for just extracting value of some dimentions (and symint nodes) vs. the one that is activation. For example: graph(x, y, weight): size_0 = torch.ops.aten.sym_size([x], [0]) size_1 = torch.ops.aten.sym_size([y], [1]) view_size = size_0 * size_1 size_3 = torch.ops.aten.sym_size([x], [2]) vie_out = torch.ops.aten.view(x, [view_size, size_3]) return mm(view_out, weight) In the example above y node is not actual input. It exist only to extract size_1",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantizer\\utils.py",
    "ast_data": "FunctionDef name:_node_only_used_for_sym_size arg:node arg:partition_nodes arguments arg arg If Call Return return:yes Return return:yes Call BoolOp Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "guess_is_tensorflow_py_library",
    "source_code": "def guess_is_tensorflow_py_library(py_file_path):\n    if not is_extension_uncompiled_python_source(py_file_path) and (not is_extension_compiled_python_source(py_file_path)):\n        return False\n    py_file_path = _norm_abs_path(py_file_path)\n    return (py_file_path.startswith(_TENSORFLOW_BASEDIR) or py_file_path.startswith(_ABSL_BASEDIR)) and (not py_file_path.endswith('_test.py')) and (os.path.normpath('tensorflow/python/debug/examples') not in os.path.normpath(py_file_path))",
    "docstring": "Guess whether a Python source file is a part of the tensorflow library. Special cases: 1) Returns False for unit-test files in the library (*_test.py), 2) Returns False for files under python/debug/examples. Args: py_file_path: full path of the Python source file in question. Returns: () Whether the file is inferred to be a part of the tensorflow library.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\source_utils.py",
    "ast_data": "FunctionDef name:guess_is_tensorflow_py_library arg:py_file_path arguments arg If BoolOp Call Call Return return:yes Assign Call Return return:yes BoolOp BoolOp Call Call Call Compare Call Call"
  },
  {
    "library": "pandas",
    "name": "EmptyDataError",
    "source_code": "class EmptyDataError(ValueError):\n    pass",
    "docstring": "Exception raised in `` when empty data or header is encountered. This error is typically encountered when attempting to read an empty file or an invalid file where no data or headers are present. See Also -------- read_csv : Read a comma-separated values (CSV) file into DataFrame. errors.ParserError : Exception that is raised by an error encountered in parsing file contents. errors.DtypeWarning : Warning raised when reading different dtypes in a column from a file. Examples -------- >>> from io import StringIO >>> empty = StringIO() >>> pd.read_csv(empty) Traceback (most recent call last): EmptyDataError: No columns to parse from file",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:EmptyDataError"
  },
  {
    "library": "matplotlib",
    "name": "get_ybound",
    "source_code": "def get_ybound(self):\n    bottom, top = self.get_ylim()\n    if bottom < top:\n        return (bottom, top)\n    else:\n        return (top, bottom)",
    "docstring": "Return the lower and upper y-axis bounds, in increasing order. See Also -------- set_ybound get_ylim, set_ylim invert_yaxis, yaxis_inverted",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_ybound arg:self arguments arg Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "clabel",
    "source_code": "def clabel(self, CS, levels=None, **kwargs):\n    return CS.clabel(levels, **kwargs)",
    "docstring": "Label a contour plot. Adds labels to line contours in given . Parameters ---------- CS : instance Line contours to label. levels : array-like, optional A list of level values, that should be labeled. The list must be a subset of `~.ContourLabeler.clabel`.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:clabel arg:self arg:CS arg:levels arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_prefix_output_keys",
    "source_code": "def _prefix_output_keys(self, output_dict, output_name):\n    new_outputs = {}\n    for key, val in output_dict.items():\n        key = self._prefix_key(key, output_name)\n        new_outputs[key] = val\n    return new_outputs",
    "docstring": "Prepend output_name to the output_dict keys if it doesn't exist. This produces predictable prefixes for the pre-determined outputs of SupervisedOutput. Args: output_dict: dict of string to Tensor, assumed valid. output_name: prefix string to prepend to existing keys. Returns: dict with updated keys and existing values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "FunctionDef name:_prefix_output_keys arg:self arg:output_dict arg:output_name arguments arg arg arg Assign For Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "keys",
    "source_code": "@compatibility(is_backward_compatible=True)\ndef keys(self, obj: 'Proxy') -> Any:\n    return Attribute(obj, 'keys')()",
    "docstring": "Called when a proxy object is has the keys() method called. This is what happens when ** is called on a proxy. This should return an iterator it ** is suppose to work in your custom tracer.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\proxy.py",
    "ast_data": "FunctionDef name:keys arg:self arg:obj arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_CSRSparseMatrixToDenseGrad",
    "source_code": "@ops.RegisterGradient('CSRSparseMatrixToDense')\ndef _CSRSparseMatrixToDenseGrad(op: ops.Operation, grad):\n    coo_sparse_tensor = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(op.inputs[0], type=grad.dtype)\n    return sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(indices=coo_sparse_tensor.indices, values=array_ops.gather_nd(grad, coo_sparse_tensor.indices), dense_shape=grad.shape)",
    "docstring": "Gradient for csr_sparse_matrix_to_dense op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\sparse\\sparse_csr_matrix_grad.py",
    "ast_data": "FunctionDef name:_CSRSparseMatrixToDenseGrad arg:op arg:grad arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, T):\n    return self._transform(T)",
    "docstring": "Transform new data by linear interpolation. Parameters ---------- T : array-like of shape (n_samples,) or (n_samples, 1) Data to transform. .. versionchanged:: 0.24 Also accepts 2d array with 1 feature. Returns ------- y_pred : ndarray of shape (n_samples,) The transformed data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\isotonic.py",
    "ast_data": "FunctionDef name:transform arg:self arg:T arguments arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_print_verbose_msg_init_end",
    "source_code": "def _print_verbose_msg_init_end(self, lb, init_has_converged):\n    converged_msg = 'converged' if init_has_converged else 'did not converge'\n    if self.verbose == 1:\n        print(f'Initialization {converged_msg}.')\n    elif self.verbose >= 2:\n        t = time() - self._init_prev_time\n        print(f'Initialization {converged_msg}. time lapse {t:.5f}s\\t lower bound {lb:.5f}.')",
    "docstring": "Print verbose message on the end of iteration.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:_print_verbose_msg_init_end arg:self arg:lb arg:init_has_converged arguments arg arg arg Assign If Compare Call If Compare Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "_get_densest",
    "source_code": "def _get_densest(A, eligibleRows):\n    rowCounts = _row_count(A)\n    return np.argmax(rowCounts * eligibleRows)",
    "docstring": "Returns the index of the densest row of A. Ignores rows that are not eligible for consideration. Parameters ---------- A : 2-D array An array representing a matrix eligibleRows : 1-D logical array Values indicate whether the corresponding row of A is eligible to be considered Returns ------- i_densest : int Index of the densest row in A eligible for consideration",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_remove_redundancy.py",
    "ast_data": "FunctionDef name:_get_densest arg:A arg:eligibleRows arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_grappler_config",
    "source_code": "def get_grappler_config(optimizers_list):\n    config = _config_pb2.ConfigProto()\n    rewrite_options = config.graph_options.rewrite_options\n    for optimizer in optimizers_list:\n        rewrite_options.optimizers.append(optimizer)\n    return config",
    "docstring": "Creates a tf.compat.v1.ConfigProto for configuring Grappler. Args: optimizers_list: List of strings that represents the list of optimizers. Returns: tf.ConfigProto.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py",
    "ast_data": "FunctionDef name:get_grappler_config arg:optimizers_list arguments arg Assign Call Assign For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "TpuContext",
    "source_code": "class TpuContext(threading.local):\n\n    def __init__(self):\n        self._number_of_shards = None\n\n    @property\n    def number_of_shards(self):\n        return self._number_of_shards\n\n    def set_number_of_shards(self, number_of_shards):\n        self._number_of_shards = number_of_shards",
    "docstring": "A context object holding state about the TPU computation being built.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_function.py",
    "ast_data": "ClassDef name:TpuContext FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:number_of_shards arg:self arguments arg Return return:yes FunctionDef name:set_number_of_shards arg:self arg:number_of_shards arguments arg arg Assign"
  },
  {
    "library": "kornia",
    "name": "get_laplacian_kernel2d",
    "source_code": "def get_laplacian_kernel2d(kernel_size: tuple[int, int] | int, *, device: Optional[Device]=None, dtype: Dtype=torch.float32) -> Tensor:\n    ky, kx = _unpack_2d_ks(kernel_size)\n    _check_kernel_size((ky, kx))\n    kernel = torch.ones((ky, kx), device=device, dtype=dtype)\n    mid_x = kx // 2\n    mid_y = ky // 2\n    kernel[mid_y, mid_x] = 1 - kernel.sum()\n    return kernel",
    "docstring": "Return Gaussian filter matrix coefficients. Args: kernel_size: filter size should be odd. device: tensor device desired to create the kernel dtype: tensor dtype desired to create the kernel Returns: 2D tensor with laplacian filter matrix coefficients. Shape: - Output: :math: Examples: >>> get_laplacian_kernel2d(3) tensor([[ 1., 1., 1.], [ 1., -8., 1.], [ 1., 1., 1.]]) >>> get_laplacian_kernel2d(5) tensor([[ 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1.], [ 1., 1., -24., 1., 1.], [ 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1.]])",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_laplacian_kernel2d arg:kernel_size arguments arg arg arg Assign Call Call Assign Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "is_finite_scalar",
    "source_code": "def is_finite_scalar(x):\n    return np.size(x) == 1 and np.isfinite(x)",
    "docstring": "Test whether is either a finite scalar or a finite array scalar.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:is_finite_scalar arg:x arguments arg Return return:yes BoolOp Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "gather",
    "source_code": "def gather(self, indices, name=None):\n    del name\n    if isinstance(indices, ops.EagerTensor):\n        indices = indices.numpy()\n    return array_ops_stack.stack([self._maybe_zero(i) for i in indices])",
    "docstring": "See TensorArray.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:gather arg:self arg:indices arg:name arguments arg arg arg If Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_count_weights",
    "source_code": "def _count_weights(exported_program: torch.export.ExportedProgram) -> tuple[defaultdict[torch.dtype, int], defaultdict[torch.dtype, int]]:\n    parameter_count: defaultdict[torch.dtype, int] = defaultdict(int)\n    buffer_count: defaultdict[torch.dtype, int] = defaultdict(int)\n    for parameter in exported_program.parameters():\n        dtype = parameter.dtype\n        parameter_count[dtype] += parameter.numel()\n    for buffer in exported_program.buffers():\n        dtype = buffer.dtype\n        buffer_count[dtype] += buffer.numel()\n    return (parameter_count, buffer_count)",
    "docstring": "Count the size of the parameters in the exported program.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_analysis.py",
    "ast_data": "FunctionDef name:_count_weights arg:exported_program arguments arg Call Call For Call Assign Call For Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_angle",
    "source_code": "def get_angle(self):\n    return self.angle",
    "docstring": "Get the rotation angle in degrees.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_angle arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_fontweight",
    "source_code": "def get_fontweight(self):\n    return self._fontproperties.get_weight()",
    "docstring": "Return the font weight as a string or a number. See Also -------- .font_manager.FontProperties.get_weight",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_fontweight arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "peek_objs",
    "source_code": "def peek_objs(self) -> Iterator[T]:\n    return (t_obj.obj for t_obj in reversed(self._stack))",
    "docstring": "Return iterator over stored objects ordered newest to oldest.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py",
    "ast_data": "FunctionDef name:peek_objs arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "hardswish",
    "source_code": "def hardswish(input: Tensor, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(hardswish, (input,), input, inplace=inplace)\n    if inplace:\n        return torch._C._nn.hardswish_(input)\n    return torch._C._nn.hardswish(input)",
    "docstring": "Apply hardswish function, element-wise. Follows implementation as described in the paper: _. .. math:: \\text{Hardswish}(x) = \\begin{cases} 0 & \\text{if~} x \\le -3, \\\\ x & \\text{if~} x \\ge +3, \\\\ x \\cdot (x + 3) /6 & \\text{otherwise} \\end{cases} See :class: for more details. .. _:",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:hardswish arg:input arg:inplace arguments arg arg If Call Return return:yes Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_krandinit",
    "source_code": "def _krandinit(data, k, rng, xp):\n    mu = xp.mean(data, axis=0)\n    k = np.asarray(k)\n    if data.ndim == 1:\n        _cov = xpx.cov(data, xp=xp)\n        x = rng.standard_normal(size=k)\n        x = xp.asarray(x)\n        x *= xp.sqrt(_cov)\n    elif data.shape[1] > data.shape[0]:\n        _, s, vh = xp.linalg.svd(data - mu, full_matrices=False)\n        x = rng.standard_normal(size=(k, xp_size(s)))\n        x = xp.asarray(x)\n        sVh = s[:, None] * vh / xp.sqrt(data.shape[0] - xp.asarray(1.0))\n        x = x @ sVh\n    else:\n        _cov = xpx.atleast_nd(xpx.cov(data.T, xp=xp), ndim=2, xp=xp)\n        x = rng.standard_normal(size=(k, xp_size(mu)))\n        x = xp.asarray(x)\n        x = x @ xp.linalg.cholesky(_cov).T\n    x += mu\n    return x",
    "docstring": "Returns k samples of a random variable whose parameters depend on data. More precisely, it returns k observations sampled from a Gaussian random variable whose mean and covariances are the ones estimated from the data. Parameters ---------- data : ndarray Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D data, rank 2 multidimensional data, in which case one row is one observation. k : int Number of samples to generate. rng : or Random number generator. Returns ------- x : ndarray A 'k' by 'N' containing the initial centroids",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\vq.py",
    "ast_data": "FunctionDef name:_krandinit arg:data arg:k arg:rng arg:xp arguments arg arg arg arg Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Call If Compare Assign Call Assign Call Call Assign Call Assign Call Call Assign Assign Call Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "execute_on",
    "source_code": "def execute_on(self, worker):\n    replica_args = _select_worker_slice(worker.worker_index, self._args)\n    replica_kwargs = _select_worker_slice(worker.worker_index, self._kwargs)\n    e = _get_error_from_remote_values(replica_args) or _get_error_from_remote_values(replica_kwargs)\n    if e:\n        if not isinstance(e, ClosureInputError):\n            e = ClosureInputError(e)\n        raise e\n    with ops.device(worker.device_name):\n        with context.executor_scope(worker.executor):\n            with coordinator_context.with_dispatch_context(worker):\n                with metric_utils.monitored_timer('closure_execution'):\n                    output_values = self._function(*nest.map_structure(coordinator_context.maybe_get_remote_value, replica_args), **nest.map_structure(coordinator_context.maybe_get_remote_value, replica_kwargs))\n    self.maybe_call_with_output_remote_value(lambda r: r._set_values(output_values))",
    "docstring": "Executes the closure on the given worker. Args: worker: a object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:execute_on arg:self arg:worker arguments arg arg Assign Call Assign Call Assign BoolOp Call Call If If Call Assign Call Raise With Call With Call With Call With Call Assign Call Call Call Call arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "_num_elements",
    "source_code": "def _num_elements(grad):\n    if isinstance(grad, tensor_lib.Tensor):\n        shape_tuple = grad._shape_tuple()\n    elif isinstance(grad, indexed_slices.IndexedSlices):\n        shape_tuple = grad.values._shape_tuple()\n    else:\n        raise ValueError('`grad` not a Tensor or IndexedSlices.')\n    if shape_tuple is None or None in shape_tuple:\n        return 0\n    return functools.reduce(operator.mul, shape_tuple, 1)",
    "docstring": "The number of elements in the tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:_num_elements arg:grad arguments arg If Call Assign Call If Call Assign Call Raise Call If BoolOp Compare Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "RgbToLinearRgb",
    "source_code": "class RgbToLinearRgb(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: Tensor) -> Tensor:\n        return rgb_to_linear_rgb(image)",
    "docstring": "Convert an image from sRGB to linear RGB. Reverses the gamma correction of sRGB to get linear RGB values for colorspace conversions. The image data is assumed to be in the range of :math: Returns: Linear RGB version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> rgb_lin = RgbToLinearRgb() >>> output = rgb_lin(input) # 2x3x4x5 References: [1] [2] [3]",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "ClassDef name:RgbToLinearRgb FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "builtin_template_path",
    "source_code": "def builtin_template_path(name):\n    return Path(__file__).parent / 'templates' / name",
    "docstring": "Return a path to a builtin template. Avoid calling this function at the module level or in a class-definition because __file__ may not exist, e.g. in frozen environments.",
    "type": "function",
    "file_path": "django\\django\\views\\csrf.py",
    "ast_data": "FunctionDef name:builtin_template_path arg:name arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "maybe_expression",
    "source_code": "def maybe_expression(s) -> bool:\n    if not isinstance(s, str):\n        return False\n    operations = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ('=',)\n    return any((op in s for op in operations))",
    "docstring": "loose checking if s is a pytables-acceptable expression",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\pytables.py",
    "ast_data": "FunctionDef name:maybe_expression arg:s arguments arg If Call Return return:yes Assign Return return:yes Call Compare"
  },
  {
    "library": "pytorch",
    "name": "storage",
    "source_code": "def storage(self):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.storage, (self,), self)\n    torch.storage._warn_typed_storage_removal(stacklevel=2)\n    return self._typed_storage()",
    "docstring": "storage() -> torch.TypedStorage Returns the underlying :class:. .. warning:: :class: is deprecated. It will be removed in the future, and :class: will be the only storage class. To access the :class: directly, use :attr:.",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:storage arg:self arguments arg If Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "segments_to_line_map",
    "source_code": "def segments_to_line_map(self, junctions: Tensor, segments: Tensor) -> Tensor:\n    num_junctions = len(junctions)\n    line_map = zeros([num_junctions, num_junctions], device=junctions.device)\n    _, idx_junc1 = where(torch.all(junctions[None] == segments[:, None, 0], dim=2))\n    _, idx_junc2 = where(torch.all(junctions[None] == segments[:, None, 1], dim=2))\n    line_map[idx_junc1, idx_junc2] = 1\n    line_map[idx_junc2, idx_junc1] = 1\n    return line_map",
    "docstring": "Convert the list of segments to line map.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\sold2\\sold2_detector.py",
    "ast_data": "FunctionDef name:segments_to_line_map arg:self arg:junctions arg:segments arguments arg arg arg Assign Call Assign Call Assign Call Call Compare Assign Call Call Compare Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sh_chebyt",
    "source_code": "def sh_chebyt(n, monic=False):\n    base = sh_jacobi(n, 0.0, 0.5, monic=monic)\n    if monic:\n        return base\n    if n > 0:\n        factor = 4 ** n / 2.0\n    else:\n        factor = 1.0\n    base._scale(factor)\n    return base",
    "docstring": "Shifted Chebyshev polynomial of the first kind. Defined as :math: for :math: the nth Chebyshev polynomial of the first kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If , scale the leading coefficient to be 1. Default is . Returns ------- T : orthopoly1d Shifted Chebyshev polynomial of the first kind. Notes ----- The polynomials :math: are orthogonal over :math: with weight function :math:.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:sh_chebyt arg:n arg:monic arguments arg arg Assign Call If Return return:yes If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_oauth_signature",
    "source_code": "def get_oauth_signature(self, method, uri, headers, body):\n    sign = self.SIGNATURE_METHODS.get(self.signature_method)\n    if not sign:\n        raise ValueError('Invalid signature method.')\n    request = OAuth1Request(method, uri, body=body, headers=headers)\n    return sign(self, request)",
    "docstring": "Get an OAuth signature to be used in signing a request. To satisfy _ item 2, if the request argument's headers dict attribute contains a Host item, its value will replace any netloc part of the request argument's uri attribute value. .. _:",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\client_auth.py",
    "ast_data": "FunctionDef name:get_oauth_signature arg:self arg:method arg:uri arg:headers arg:body arguments arg arg arg arg arg Assign Call If Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_iter_chunked",
    "source_code": "def _iter_chunked(x0, x1, chunksize=4, inc=1):\n    if inc == 0:\n        raise ValueError('Cannot increment by zero.')\n    if chunksize <= 0:\n        raise ValueError(f'Chunk size must be positive; got {chunksize}.')\n    s = 1 if inc > 0 else -1\n    stepsize = abs(chunksize * inc)\n    x = np.copy(x0)\n    while (x - x1) * inc < 0:\n        delta = min(stepsize, abs(x - x1))\n        step = delta * s\n        supp = np.arange(x, x + step, inc)\n        x += step\n        yield supp",
    "docstring": "Iterate from x0 to x1 in chunks of chunksize and steps inc. x0 must be finite, x1 need not be. In the latter case, the iterator is infinite. Handles both x0 x1. In the latter case, iterates downwards (make sure to set inc >> from scipy.stats._distn_infrastructure import _iter_chunked >>> [x for x in _iter_chunked(2, 5, inc=2)] [array([2, 4])] >>> [x for x in _iter_chunked(2, 11, inc=2)] [array([2, 4, 6, 8]), array([10])] >>> [x for x in _iter_chunked(2, -5, inc=-2)] [array([ 2, 0, -2, -4])] >>> [x for x in _iter_chunked(2, -9, inc=-2)] [array([ 2, 0, -2, -4]), array([-6, -8])]",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:_iter_chunked arg:x0 arg:x1 arg:chunksize arg:inc arguments arg arg arg arg If Compare Raise Call If Compare Raise Call Assign Compare Assign Call Assign Call While Compare Assign Call Call Assign Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_encoding_multiclass",
    "source_code": "def _fit_encoding_multiclass(self, X_ordinal, y, n_categories, target_mean):\n    n_features = self.n_features_in_\n    n_classes = len(self.classes_)\n    encodings = []\n    for i in range(n_classes):\n        y_class = y[:, i]\n        encoding = self._fit_encoding_binary_or_continuous(X_ordinal, y_class, n_categories, target_mean[i])\n        encodings.extend(encoding)\n    reorder_index = (idx for start in range(n_features) for idx in range(start, n_classes * n_features, n_features))\n    return [encodings[idx] for idx in reorder_index]",
    "docstring": "Learn multiclass encodings. Learn encodings for each class (c) then reorder encodings such that the same features (f) are grouped together. enables converting from: f0_c0, f1_c0, f0_c1, f1_c1, f0_c2, f1_c2 to: f0_c0, f0_c1, f0_c2, f1_c0, f1_c1, f1_c2",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_target_encoder.py",
    "ast_data": "FunctionDef name:_fit_encoding_multiclass arg:self arg:X_ordinal arg:y arg:n_categories arg:target_mean arguments arg arg arg arg arg Assign Assign Call Assign For Call Assign Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "StatsForLoop",
    "source_code": "@dataclasses.dataclass\nclass StatsForLoop:\n    count_per_thread: int = 0\n    bytes_per_thread: int = 0\n\n    def __add__(self, other: typing.Self) -> StatsForLoop:\n        return StatsForLoop(count_per_thread=self.count_per_thread + other.count_per_thread, bytes_per_thread=self.bytes_per_thread + other.bytes_per_thread)",
    "docstring": "Memory usage stats for single loop in the generated kernel",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd_kernel_features.py",
    "ast_data": "ClassDef name:StatsForLoop FunctionDef name:__add__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_pt2_compile_substack",
    "source_code": "def get_pt2_compile_substack(self):\n    if hasattr(self.tls, 'pt2_compile_substack'):\n        return self.tls.pt2_compile_substack\n    else:\n        self.tls.pt2_compile_substack = []\n        return self.tls.pt2_compile_substack",
    "docstring": "A smaller subset of the main stack that gets used to log PT2 Compile Events internally.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:get_pt2_compile_substack arg:self arguments arg If Call Return return:yes Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "from_pretrained",
    "source_code": "@classmethod\ndef from_pretrained(cls, checkpoint: str='depth', device: Optional[torch.device]=None) -> DISK:\n    urls = {'depth': 'https://raw.githubusercontent.com/cvlab-epfl/disk/master/depth-save.pth', 'epipolar': 'https://raw.githubusercontent.com/cvlab-epfl/disk/master/epipolar-save.pth'}\n    if checkpoint not in urls:\n        raise ValueError(f'Unknown pretrained model: {checkpoint}')\n    if device is None:\n        device = torch.device('cpu')\n    pretrained_dict = torch.hub.load_state_dict_from_url(urls[checkpoint], map_location=device)\n    model: DISK = cls().to(device)\n    model.load_state_dict(pretrained_dict['extractor'])\n    model.eval()\n    return model",
    "docstring": "Load a pretrained model. Depth model was trained using depth map supervision and is slightly more precise but biased to detect keypoints only where SfM depth is available. Epipolar model was trained using epipolar geometry supervision and is less precise but detects keypoints everywhere where they are matchable. The difference is especially pronounced on thin structures and on edges of objects. Args: checkpoint: The checkpoint to load. One of 'depth' or 'epipolar'. device: The device to load the model to. Returns: The pretrained model.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\disk\\disk.py",
    "ast_data": "FunctionDef name:from_pretrained arg:cls arg:checkpoint arg:device arguments arg arg arg Assign If Compare Raise Call If Compare Assign Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "normalize_newlines",
    "source_code": "@keep_lazy_text\ndef normalize_newlines(text):\n    return re_newlines.sub('\\n', str(text))",
    "docstring": "Normalize CRLF and CR newlines to just LF.",
    "type": "function",
    "file_path": "django\\django\\utils\\text.py",
    "ast_data": "FunctionDef name:normalize_newlines arg:text arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_np_to_tnp_map",
    "source_code": "@functools.lru_cache(maxsize=1)\ndef get_np_to_tnp_map():\n    from ..utils import NP_TO_TNP_MODULE\n    np_fn_to_tnp_fn = {}\n    for np_mod, tnp_mod in NP_TO_TNP_MODULE.items():\n        for fn_name, tnp_fn in tnp_mod.__dict__.items():\n            if callable(tnp_fn):\n                if (np_fn := getattr(np_mod, fn_name, None)):\n                    np_fn_to_tnp_fn[np_fn] = tnp_fn\n    return np_fn_to_tnp_fn",
    "docstring": "This generates a mapping from numpy modules to their torch._numpy modules equivalents.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\misc.py",
    "ast_data": "FunctionDef name:get_np_to_tnp_map arguments Assign For Call For Call If Call If Call Assign Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "validate_registration_endpoint",
    "source_code": "def validate_registration_endpoint(self):\n    url = self.get('registration_endpoint')\n    if url and (not is_secure_transport(url)):\n        raise ValueError('\"registration_endpoint\" MUST use \"https\" scheme')",
    "docstring": "OPTIONAL. URL of the authorization server's OAuth 2.0 Dynamic Client Registration endpoint [RFC7591].",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py",
    "ast_data": "FunctionDef name:validate_registration_endpoint arg:self arguments arg Assign Call If BoolOp Call Raise Call"
  },
  {
    "library": "scrapy",
    "name": "set",
    "source_code": "def set(self, name: _SettingsKeyT, value: Any, priority: int | str='project') -> None:\n    self._assert_mutability()\n    priority = get_settings_priority(priority)\n    if name not in self:\n        if isinstance(value, SettingsAttribute):\n            self.attributes[name] = value\n        else:\n            self.attributes[name] = SettingsAttribute(value, priority)\n    else:\n        self.attributes[name].set(value, priority)",
    "docstring": "Store a key/value attribute with a given priority. Settings should be populated *before* configuring the Crawler object (through the :meth: method), otherwise they won't have any effect. :param name: the setting name :type name: str :param value: the value to associate with the setting :type value: object :param priority: the priority of the setting. Should be a key of :attr: or an integer :type priority: str or int",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:set arg:self arg:name arg:value arg:priority arguments arg arg arg arg Call Assign Call If Compare If Call Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "propagate_qconfig_",
    "source_code": "def propagate_qconfig_(module, qconfig_dict=None, prepare_custom_config_dict=None):\n    if qconfig_dict is None:\n        qconfig_dict = {}\n    if prepare_custom_config_dict is None:\n        prepare_custom_config_dict = {}\n    _propagate_qconfig_helper(module, qconfig_dict, prepare_custom_config_dict=prepare_custom_config_dict)",
    "docstring": "Propagate qconfig through the module hierarchy and assign attribute on each leaf module Args: module: input module qconfig_dict: dictionary that maps from name or type of submodule to quantization configuration, qconfig applies to all submodules of a given module unless qconfig for the submodules are specified (when the submodule already has qconfig attribute) prepare_custom_config_dict: dictionary for custom handling of modules see docs for :func: Return: None, module is modified inplace with qconfig attached",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py",
    "ast_data": "FunctionDef name:propagate_qconfig_ arg:module arg:qconfig_dict arg:prepare_custom_config_dict arguments arg arg arg If Compare Assign If Compare Assign Call"
  },
  {
    "library": "django",
    "name": "_check_filter_horizontal",
    "source_code": "def _check_filter_horizontal(self, obj):\n    if not isinstance(obj.filter_horizontal, (list, tuple)):\n        return must_be('a list or tuple', option='filter_horizontal', obj=obj, id='admin.E018')\n    else:\n        return list(chain.from_iterable((self._check_filter_item(obj, field_name, 'filter_horizontal[%d]' % index) for index, field_name in enumerate(obj.filter_horizontal))))",
    "docstring": "Check that filter_horizontal is a sequence of field names.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_filter_horizontal arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_duplicate_body_captures_in_cond",
    "source_code": "def _duplicate_body_captures_in_cond(cond_graph, body_graph_captures):\n    types = [t.dtype.as_datatype_enum for t in body_graph_captures]\n    with cond_graph._c_graph.get() as c_graph:\n        placeholders = c_api.TF_CreatePlaceholders(c_graph, types, compat.as_str(_build_cond_placeholders_name_prefix(cond_graph)))\n    placeholder_ops = [ops.Operation._from_c_op(ph.oper, cond_graph) for ph in placeholders]\n    tensors = []\n    for op in placeholder_ops:\n        tensors.append(op.outputs[0])\n    tuples = zip(body_graph_captures, tensors)\n    keys = [id(t) for t in body_graph_captures]\n    for k, v in zip(keys, tuples):\n        cond_graph._function_captures.add_or_replace(key=k, external=v[0], internal=v[1], is_by_ref=False)\n    cond_graph.inputs.extend(tensors)",
    "docstring": "Creates placeholders for body captures in cond_graph. This is needed to match signatures of cond and body graphs. Args: cond_graph: cond branch graph body_graph_captures: Tensors which were captured when building the .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py",
    "ast_data": "FunctionDef name:_duplicate_body_captures_in_cond arg:cond_graph arg:body_graph_captures arguments arg arg Assign With Call Assign Call Call Call Assign Call Assign For Call Assign Call Assign Call For Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_or_validate_filenames_dataset",
    "source_code": "def _create_or_validate_filenames_dataset(filenames, name=None):\n    if isinstance(filenames, data_types.DatasetV2):\n        element_type = dataset_ops.get_legacy_output_types(filenames)\n        if element_type != dtypes.string:\n            raise TypeError(f'The `filenames` argument must contain `tf.string` elements. Got a dataset of `{element_type!r}` elements.')\n        element_shape = dataset_ops.get_legacy_output_shapes(filenames)\n        if not element_shape.is_compatible_with(tensor_shape.TensorShape([])):\n            raise TypeError(f'The `filenames` argument must contain `tf.string` elements of shape [] (i.e. scalars). Got a dataset of element shape {element_shape!r}.')\n    else:\n        filenames = nest.map_structure(_normalise_fspath, filenames)\n        filenames = ops.convert_to_tensor(filenames, dtype_hint=dtypes.string)\n        if filenames.dtype != dtypes.string:\n            raise TypeError(f'The `filenames` argument must contain `tf.string` elements. Got `{filenames.dtype!r}` elements.')\n        filenames = array_ops.reshape(filenames, [-1], name='flat_filenames')\n        filenames = from_tensor_slices_op._TensorSliceDataset(filenames, is_files=True, name=name)\n    return filenames",
    "docstring": "Creates (or validates) a dataset of filenames. Args: filenames: Either a list or dataset of filenames. If it is a list, it is convert to a dataset. If it is a dataset, its type and shape is validated. name: (Optional.) A name for the tf.data operation. Returns: A dataset of filenames.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py",
    "ast_data": "FunctionDef name:_create_or_validate_filenames_dataset arg:filenames arg:name arguments arg arg If Call Assign Call If Compare Raise Call Assign Call If Call Call Raise Call Assign Call Assign Call If Compare Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "save_path",
    "source_code": "@property\ndef save_path(self):\n    return self._save_path",
    "docstring": "Return the save path used by the supervisor. Returns: A string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:save_path arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "def predict_log_proba(self, X, **params):\n    _raise_for_params(params, self, 'predict_log_proba')\n    check_is_fitted(self)\n    if hasattr(self.estimator_, 'predict_log_proba'):\n        X = validate_data(self, X, accept_sparse=['csr', 'csc'], dtype=None, ensure_all_finite=False, reset=False)\n        if _routing_enabled():\n            routed_params = process_routing(self, 'predict_log_proba', **params)\n        else:\n            routed_params = Bunch()\n            routed_params.estimator = Bunch(predict_log_proba=Bunch())\n        n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)\n        all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)((delayed(_parallel_predict_log_proba)(self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, self.n_classes_, params=routed_params.estimator.predict_log_proba) for i in range(n_jobs)))\n        log_proba = all_log_proba[0]\n        for j in range(1, len(all_log_proba)):\n            log_proba = np.logaddexp(log_proba, all_log_proba[j])\n        log_proba -= np.log(self.n_estimators)\n    else:\n        log_proba = np.log(self.predict_proba(X, **params))\n    return log_proba",
    "docstring": "Predict class log-probabilities for X. The predicted class log-probabilities of an input sample is computed as the log of the mean predicted class probabilities of the base estimators in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. **params : dict Parameters routed to the , the or the method of the sub-estimators via the metadata routing API. The routing is tried in the mentioned order depending on whether this method is available on the sub-estimator. .. versionadded:: 1.7 Only available if is set. See :ref: for more details. Returns ------- p : ndarray of shape (n_samples, n_classes) The class log-probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg arg Call Call If Call Assign Call If Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Call Call Call Assign For Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "sh_legendre",
    "source_code": "def sh_legendre(n, monic=False):\n    if n < 0:\n        raise ValueError('n must be nonnegative.')\n\n    def wfunc(x):\n        return 0.0 * x + 1.0\n    if n == 0:\n        return orthopoly1d([], [], 1.0, 1.0, wfunc, (0, 1), monic, lambda x: _ufuncs.eval_sh_legendre(n, x))\n    x, w = roots_sh_legendre(n)\n    hn = 1.0 / (2 * n + 1.0)\n    kn = _gam(2 * n + 1) / _gam(n + 1) ** 2\n    p = orthopoly1d(x, w, hn, kn, wfunc, limits=(0, 1), monic=monic, eval_func=lambda x: _ufuncs.eval_sh_legendre(n, x))\n    return p",
    "docstring": "Shifted Legendre polynomial. Defined as :math: for :math: the nth Legendre polynomial. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If , scale the leading coefficient to be 1. Default is . Returns ------- P : orthopoly1d Shifted Legendre polynomial. Notes ----- The polynomials :math: are orthogonal over :math: with weight function 1.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:sh_legendre arg:n arg:monic arguments arg arg If Compare Raise Call FunctionDef name:wfunc arg:x arguments arg Return return:yes If Compare Return return:yes Call arguments arg Call Assign Call Assign Assign Call Call Assign Call arguments arg Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "aps02_f",
    "source_code": "def aps02_f(x):\n    ii = np.arange(1, 21)\n    return -2 * np.sum((2 * ii - 5) ** 2 / (x - ii ** 2) ** 3)",
    "docstring": "poles at x=n**2, 1st and 2nd derivatives at root are also close to 0",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:aps02_f arg:x arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "need_serialize",
    "source_code": "@classmethod\ndef need_serialize(cls) -> bool:\n    return len(cls._new_cache_artifacts) != 0",
    "docstring": "Have we seen new artifacts since last serialize call?",
    "type": "method",
    "file_path": "pytorch\\torch\\compiler\\_cache.py",
    "ast_data": "FunctionDef name:need_serialize arg:cls arguments arg Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_c_check_aliasing_constraint",
    "source_code": "def _c_check_aliasing_constraint(name, args, kwargs, result, get_module=lambda: '???'):\n    tuple_result = result\n    if not isinstance(result, tuple):\n        tuple_result = (result,)\n    if _C._any_output_is_alias_to_input_or_output(args, kwargs, tuple_result):\n        raise RuntimeError(f'{name} (with implementation in {get_module()}): The output of this custom operator (1) must not also be an input to this custom operator and (2) may not alias any inputs to this custom operator or other returns. The most common way to trigger this error is if we have y = custom_op(x) and y and x are the same Tensor. Please instead return a clone of the offending output tensor(s) (e.g. return x.clone()) or refactor the custom operator to not return y.')",
    "docstring": "custom operators' outputs must not have any aliases This version uses C++ implementation for perf. Only List container is supported. Tensors in Lists with not only Tensors are checked.",
    "type": "function",
    "file_path": "pytorch\\torch\\_library\\utils.py",
    "ast_data": "FunctionDef name:_c_check_aliasing_constraint arg:name arg:args arg:kwargs arg:result arg:get_module arguments arg arg arg arg arg arguments Assign If Call Assign If Call Raise Call Call"
  },
  {
    "library": "kornia",
    "name": "rotation_matrix_to_axis_angle",
    "source_code": "def rotation_matrix_to_axis_angle(rotation_matrix: Tensor) -> Tensor:\n    if not isinstance(rotation_matrix, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(rotation_matrix)}')\n    if not rotation_matrix.shape[-2:] == (3, 3):\n        raise ValueError(f'Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}')\n    quaternion: Tensor = rotation_matrix_to_quaternion(rotation_matrix)\n    return quaternion_to_axis_angle(quaternion)",
    "docstring": "Convert 3x3 rotation matrix to Rodrigues vector in radians. Args: rotation_matrix: rotation matrix of shape :math:. Returns: Rodrigues vector transformation of shape :math:. Example: >>> input = tensor([[1., 0., 0.], ... [0., 1., 0.], ... [0., 0., 1.]]) >>> rotation_matrix_to_axis_angle(input) tensor([0., 0., 0.]) >>> input = tensor([[1., 0., 0.], ... [0., 0., -1.], ... [0., 1., 0.]]) >>> rotation_matrix_to_axis_angle(input) tensor([1.5708, 0.0000, 0.0000])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:rotation_matrix_to_axis_angle arg:rotation_matrix arguments arg If Call Raise Call Call If Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_merge_box_list",
    "source_code": "def _merge_box_list(boxes: list[torch.Tensor], method: str='pad') -> tuple[torch.Tensor, list[int]]:\n    if not all((box.shape[-2:] == torch.Size([4, 2]) and box.dim() == 3 for box in boxes)):\n        raise TypeError(f'Input boxes must be a list of (N, 4, 2) shaped. Got: {[box.shape for box in boxes]}.')\n    if method == 'pad':\n        max_N = max((box.shape[0] for box in boxes))\n        stats = [max_N - box.shape[0] for box in boxes]\n        output = torch.nn.utils.rnn.pad_sequence(boxes, batch_first=True)\n    else:\n        raise NotImplementedError(f'`{method}` is not implemented.')\n    return (output, stats)",
    "docstring": "Merge a list of boxes into one tensor.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:_merge_box_list arg:boxes arg:method arguments arg arg If Call BoolOp Compare Call Compare Call Raise Call If Compare Assign Call Assign Assign Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self) -> tensor_shape.TensorShape:\n    local_shape = self._values[0].shape\n    global_shape = local_shape.as_list()\n    global_shape[self.shard_dim] = global_shape[self.shard_dim] * len(self.values)\n    return tensor_shape.TensorShape(global_shape)",
    "docstring": "Returns the shape of the embedding variable for the current context.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_date_list_period",
    "source_code": "def get_date_list_period(self):\n    return self.date_list_period",
    "docstring": "Get the aggregation period for the list of dates: 'year', 'month', or 'day'.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_date_list_period arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_to_components",
    "source_code": "@abc.abstractmethod\ndef _to_components(self, value):\n    raise NotImplementedError('%s._to_components()' % type(self).__name__)",
    "docstring": "Encodes as a nested structure of or . Args: value: A value compatible with this . (Caller is responsible for ensuring compatibility.) Returns: A nested structure of or compatible with , which can be used to reconstruct .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:_to_components arg:self arg:value arguments arg arg Raise Call Call"
  },
  {
    "library": "scipy",
    "name": "DenseOutput",
    "source_code": "class DenseOutput:\n\n    def __init__(self, t_old, t):\n        self.t_old = t_old\n        self.t = t\n        self.t_min = min(t, t_old)\n        self.t_max = max(t, t_old)\n\n    def __call__(self, t):\n        t = np.asarray(t)\n        if t.ndim > 1:\n            raise ValueError('`t` must be a float or a 1-D array.')\n        return self._call_impl(t)\n\n    def _call_impl(self, t):\n        raise NotImplementedError",
    "docstring": "Base class for local interpolant over step made by an ODE solver. It interpolates between and (see Attributes below). Evaluation outside this interval is not forbidden, but the accuracy is not guaranteed. Attributes ---------- t_min, t_max : float Time range of the interpolation.",
    "type": "class",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\base.py",
    "ast_data": "ClassDef name:DenseOutput FunctionDef name:__init__ arg:self arg:t_old arg:t arguments arg arg arg Assign Assign Assign Call Assign Call FunctionDef name:__call__ arg:self arg:t arguments arg arg Assign Call If Compare Raise Call Return return:yes Call FunctionDef name:_call_impl arg:self arg:t arguments arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_delete_tmp_write_dir",
    "source_code": "def _delete_tmp_write_dir(self):\n    distributed_file_utils.remove_temp_dirpath(self.log_dir, self.model.distribute_strategy)",
    "docstring": "Deletes tmp write directories for multi-worker.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_delete_tmp_write_dir arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "call_module_maximum_supported_version",
    "source_code": "def call_module_maximum_supported_version():\n    return 10",
    "docstring": "Maximum version of XlaCallModule op supported. See versioning details documentation for the XlaCallModule op at:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py",
    "ast_data": "FunctionDef name:call_module_maximum_supported_version arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_erase_arg_defaults",
    "source_code": "def _erase_arg_defaults(self, node):\n    args = node.args\n    for i in range(len(args.defaults)):\n        args.defaults[i] = parser.parse_expression('None')\n    for i, d in enumerate(args.kw_defaults):\n        if d is not None:\n            args.kw_defaults[i] = parser.parse_expression('None')\n    return node",
    "docstring": "Erase arg default expressions, which would otherwise be unbound.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py",
    "ast_data": "FunctionDef name:_erase_arg_defaults arg:self arg:node arguments arg arg Assign For Call Call Assign Call For Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "unit_regular_star",
    "source_code": "@classmethod\ndef unit_regular_star(cls, numVertices, innerCircle=0.5):\n    if numVertices <= 16:\n        path = cls._unit_regular_stars.get((numVertices, innerCircle))\n    else:\n        path = None\n    if path is None:\n        ns2 = numVertices * 2\n        theta = 2 * np.pi / ns2 * np.arange(ns2 + 1)\n        theta += np.pi / 2.0\n        r = np.ones(ns2 + 1)\n        r[1::2] = innerCircle\n        verts = (r * np.vstack((np.cos(theta), np.sin(theta)))).T\n        path = cls(verts, closed=True, readonly=True)\n        if numVertices <= 16:\n            cls._unit_regular_stars[numVertices, innerCircle] = path\n    return path",
    "docstring": "Return a :class: for a unit regular star with the given numVertices and radius of 1.0, centered at (0, 0).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:unit_regular_star arg:cls arg:numVertices arg:innerCircle arguments arg arg arg If Compare Assign Call Assign If Compare Assign Assign Call Assign Call Assign Assign Call Call Call Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_sharding_grad",
    "source_code": "@ops.RegisterGradient('XlaSharding')\ndef _sharding_grad(op, grad):\n    sharding_attr = op.get_attr('sharding')\n    grad_sharding = gen_xla_ops.xla_sharding(grad, sharding=sharding_attr, unspecified_dims=op.get_attr('unspecified_dims'))\n    grad_sharding.op._set_attr('_XlaSharding', attr_value_pb2.AttrValue(s=sharding_attr))\n    return [grad_sharding]",
    "docstring": "Gradient for XlaSharding op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\tf2xla\\python\\xla.py",
    "ast_data": "FunctionDef name:_sharding_grad arg:op arg:grad arguments arg arg Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_argus_phi",
    "source_code": "def _argus_phi(chi):\n    return sc.gammainc(1.5, chi ** 2 / 2) / 2",
    "docstring": "Utility function for the argus distribution used in the pdf, sf and moment calculation. Note that for all x > 0: gammainc(1.5, x**2/2) = 2 * (_norm_cdf(x) - x * _norm_pdf(x) - 0.5). This can be verified directly by noting that the cdf of Gamma(1.5) can be written as erf(sqrt(x)) - 2*sqrt(x)*exp(-x)/sqrt(Pi). We use gammainc instead of the usual definition because it is more precise for small chi.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_argus_phi arg:chi arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "LocalCellSource",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass LocalCellSource(Source):\n    local_name: str\n\n    def reconstruct(self, codegen: 'PyCodegen'):\n        codegen.append_output(codegen.create_load_closure(self.local_name))",
    "docstring": "Conceptually, this class is for cell objects implicitly generated by Python (e.g., captured variables).",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\source.py",
    "ast_data": "ClassDef name:LocalCellSource FunctionDef name:reconstruct arg:self arg:codegen arguments arg arg Call Call Call"
  },
  {
    "library": "scipy",
    "name": "mode",
    "source_code": "def mode(self, df, scale):\n    dim, df, scale = self._process_parameters(df, scale)\n    out = self._mode(dim, df, scale)\n    return _squeeze_output(out) if out is not None else out",
    "docstring": "Mode of the Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix. Parameters ---------- %(_doc_default_callparams)s Returns ------- mode : float or None The Mode of the distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:mode arg:self arg:df arg:scale arguments arg arg arg Assign Call Assign Call Return return:yes Compare Call"
  },
  {
    "library": "scrapy",
    "name": "maybe_deferred_to_future",
    "source_code": "def maybe_deferred_to_future(d: Deferred[_T]) -> Deferred[_T] | Future[_T]:\n    if not is_asyncio_reactor_installed():\n        return d\n    return deferred_to_future(d)",
    "docstring": ".. versionadded:: 2.6.0 Return *d* as an object that can be awaited from a :ref:. What you can await in Scrapy callables defined as coroutines depends on the value of :setting:: - When :ref:, you can only await on :class: objects. - When not using the asyncio reactor, you can only await on :class: objects. If you want to write code that uses `` objects:: class MySpider(Spider): ... async def parse(self, response): additional_request = scrapy.Request(' deferred = self.crawler.engine.download(additional_request) additional_response = await maybe_deferred_to_future(deferred)",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\defer.py",
    "ast_data": "FunctionDef name:maybe_deferred_to_future arg:d arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "_default_values",
    "source_code": "def _default_values(self, n: int) -> list[DashPatternWithOffset]:\n    dashes: list[str | DashPattern] = ['-', (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1)]\n    p = 3\n    while len(dashes) < n:\n        a = itertools.combinations_with_replacement([3, 1.25], p)\n        b = itertools.combinations_with_replacement([4, 1], p)\n        segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1]))\n        for segments in segment_list:\n            gap = min(segments)\n            spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n            dashes.append(spec)\n        p += 1\n    return [self._get_dash_pattern(x) for x in dashes]",
    "docstring": "Build an arbitrarily long list of unique dash styles for lines. Parameters ---------- n : int Number of unique dash specs to generate. Returns ------- dashes : list of strings or tuples Valid arguments for the `matplotlib.lines.Line2D`), the remainder are sequences of long and short dashes.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:_default_values arg:self arg:n arguments arg arg Assign While Compare Call Assign Call Assign Call Assign Call Call Call Call For Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "calc_ray_params",
    "source_code": "def calc_ray_params(self, cameras: PinholeCamera, num_img_rays: Tensor) -> None:\n    num_cams = cameras.batch_size\n    if num_cams != num_img_rays.shape[0]:\n        raise ValueError(f'Number of cameras {num_cams} does not match size of tensor to define number of rays to march from each camera {num_img_rays.shape[0]}')\n    points_2d_camera = self.sample_points_2d(cameras.height, cameras.width, num_img_rays)\n    self._calc_ray_params(cameras, points_2d_camera)",
    "docstring": "Calculate ray parameters: origins, directions. Also stored are camera ids for each ray, and its pixel coordinates. Args: cameras: scene cameras: PinholeCamera num_img_rays: tensor that holds the number of rays to randomly cast from each scene camera: int math: .",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\samplers.py",
    "ast_data": "FunctionDef name:calc_ray_params arg:self arg:cameras arg:num_img_rays arguments arg arg arg Assign If Compare Raise Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, targetfig, toolfig):\n    self.figure = toolfig\n    self.targetfig = targetfig\n    toolfig.subplots_adjust(left=0.2, right=0.9)\n    toolfig.suptitle('Click on slider to adjust subplot param')\n    self._sliders = []\n    names = ['left', 'bottom', 'right', 'top', 'wspace', 'hspace']\n    for name, ax in zip(names, toolfig.subplots(len(names) + 1)):\n        ax.set_navigate(False)\n        slider = Slider(ax, name, 0, 1, valinit=getattr(targetfig.subplotpars, name))\n        slider.on_changed(self._on_slider_changed)\n        self._sliders.append(slider)\n    toolfig.axes[-1].remove()\n    self.sliderleft, self.sliderbottom, self.sliderright, self.slidertop, self.sliderwspace, self.sliderhspace = self._sliders\n    for slider in [self.sliderleft, self.sliderbottom, self.sliderwspace, self.sliderhspace]:\n        slider.closedmax = False\n    for slider in [self.sliderright, self.slidertop]:\n        slider.closedmin = False\n    self.sliderleft.slidermax = self.sliderright\n    self.sliderright.slidermin = self.sliderleft\n    self.sliderbottom.slidermax = self.slidertop\n    self.slidertop.slidermin = self.sliderbottom\n    bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])\n    self.buttonreset = Button(bax, 'Reset')\n    self.buttonreset.on_clicked(self._on_reset)",
    "docstring": "Parameters ---------- targetfig : The figure instance to adjust. toolfig : The figure instance to embed the subplot tool into.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:targetfig arg:toolfig arguments arg arg arg Assign Assign Call Call Assign Assign For Call Call Call Call Assign Call Call Call Call Call Assign For Assign For Assign Assign Assign Assign Assign Assign Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "cov",
    "source_code": "def cov(self, n, p):\n    n, p, npcond = self._process_parameters(n, p)\n    nn = n[..., np.newaxis, np.newaxis]\n    result = nn * np.einsum('...j,...k->...jk', -p, p)\n    for i in range(p.shape[-1]):\n        result[..., i, i] += n * p[..., i]\n    return self._checkresult(result, npcond, np.nan)",
    "docstring": "Covariance matrix of the multinomial distribution. Parameters ---------- %(_doc_default_callparams)s Returns ------- cov : ndarray The covariance matrix of the distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:cov arg:self arg:n arg:p arguments arg arg arg Assign Call Assign Assign Call For Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "num_tasks",
    "source_code": "@property\ndef num_tasks(self):\n    return self._device_coordinates.shape[0]",
    "docstring": "Returns the number of TensorFlow tasks in the TPU slice.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py",
    "ast_data": "FunctionDef name:num_tasks arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "p_max",
    "source_code": "def p_max(self, n: int) -> int:\n    return self._post_padding(n)[1]",
    "docstring": "Index of first non-overlapping upper time slice for sample input. Note that center point t[p_max] = (p_max(n)-1) * is typically larger than last time index t[n-1] == (-1) * . The upper border of samples indexes covered by the window slices is given by . Furthermore, does not denote the number of slices since is typically less than zero. A detailed example is provided in the :ref: section of the :ref:. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. p_min: The smallest possible slice index. p_num: Number of time slices, i.e., - . p_range: Determine and validate slice index range. ShortTimeFFT: Class this method belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:p_max arg:self arg:n arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_image_magnification",
    "source_code": "def get_image_magnification(self):\n    return self.image_magnification",
    "docstring": "Get the factor by which to magnify images passed to draw_image. Allows a backend to have images at a different resolution to other artists.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py",
    "ast_data": "FunctionDef name:get_image_magnification arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "efficientvit_backbone_b2",
    "source_code": "def efficientvit_backbone_b2(**kwargs: dict[str, Any]) -> EfficientViTBackbone:\n    backbone = EfficientViTBackbone(width_list=[24, 48, 96, 192, 384], depth_list=[1, 3, 4, 4, 6], dim=32, **build_kwargs_from_config(kwargs, EfficientViTBackbone))\n    return backbone",
    "docstring": "Create EfficientViT B2.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\backbone.py",
    "ast_data": "FunctionDef name:efficientvit_backbone_b2 arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_height_ratios",
    "source_code": "def get_height_ratios(self):\n    return self._row_height_ratios",
    "docstring": "Return the height ratios. This is *None* if no height ratios have been set explicitly.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py",
    "ast_data": "FunctionDef name:get_height_ratios arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "equal",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef equal(x, y):\n    return math_ops.equal(x, y)",
    "docstring": "Element-wise equality between two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:equal arg:x arg:y arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "with_values",
    "source_code": "def with_values(self, new_values):\n    new_values = _convert_to_ragged_tensor_values(new_values)\n    new_values.shape.with_rank_at_least(1)\n    self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1])\n    if isinstance(new_values, RaggedTensor) and self._row_partition.dtype != new_values.row_splits.dtype:\n        if not ragged_config.auto_cast_partition_dtype():\n            raise ValueError('self and new_values have mismatched row_splits dtypes; use RaggedTensor.with_row_splits_dtype() to convert them to compatible dtypes.')\n        new_values = new_values.with_row_splits_dtype(dtypes.int64)\n        return self.with_row_splits_dtype(dtypes.int64).with_values(new_values)\n    return RaggedTensor(values=new_values, row_partition=self._row_partition, internal=True)",
    "docstring": "Returns a copy of with replaced by . Preserves cached row-partitioning tensors such as and if they have values. Args: new_values: Potentially ragged tensor to use as the for the returned . Must have , and must have the same number of rows as . Returns: A . .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:with_values arg:self arg:new_values arguments arg arg Assign Call Call Call If BoolOp Call Compare If Call Raise Call Assign Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "itemsize",
    "source_code": "def itemsize(self):\n    return self._size",
    "docstring": "Return the itemsize of the variable. Returns ------- itemsize : int The element size of the variable (e.g., 8 for float64).",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_netcdf.py",
    "ast_data": "FunctionDef name:itemsize arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, input_gen):\n    self.input_gen = input_gen",
    "docstring": "Creates a representative dataset. Args: input_gen: A generator function that generates input samples for the model and has the same order, type and shape as the inputs to the model. Usually, this is a small subset of a few hundred samples randomly chosen, in no particular order, from the training or evaluation dataset.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:input_gen arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "mark_first_last_usage",
    "source_code": "def mark_first_last_usage(self, lines):\n    seen = OrderedSet[AllocationPool]()\n    for line in lines:\n        if isinstance(line, AllocFromPoolLine):\n            assert line.group.allocation\n            pool = line.group.allocation.pool\n            assert pool is not None\n            if pool not in seen:\n                line.is_first_pool_usage = True\n                seen.add(pool)\n    seen = OrderedSet[AllocationPool]()\n    for line in reversed(lines):\n        if isinstance(line, DeallocFromPoolLine):\n            assert line.group.allocation\n            pool = line.group.allocation.pool\n            assert pool is not None\n            if pool not in seen:\n                line.is_last_pool_usage = pool.root.get_live_ranges().end <= line.timestep\n                seen.add(pool)",
    "docstring": "Populate the AllocFromPoolLine.is_first_pool_usage and DeallocFromPoolLine.is_last_pool_usage fields so that pools are created/destroyed.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:mark_first_last_usage arg:self arg:lines arguments arg arg Assign Call For If Call Assign Compare If Compare Assign Call Assign Call For Call If Call Assign Compare If Compare Assign Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "path_to_string",
    "source_code": "def path_to_string(path):\n    if isinstance(path, os.PathLike):\n        return os.fspath(path)\n    return path",
    "docstring": "Convert objects to their string representation. If given a non-string typed path object, converts it to its string representation. If the object passed to is not among the above, then it is returned unchanged. This allows e.g. passthrough of file objects through this function. Args: path: object that represents a path Returns: A string representation of the path argument, if Python support exists.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\io_utils.py",
    "ast_data": "FunctionDef name:path_to_string arg:path arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_adjust_index",
    "source_code": "def _adjust_index(index, thresholds, offsets):\n    t_index = array_ops.shape(array_ops.boolean_mask(thresholds, math_ops.less_equal(thresholds, index)))[0] - 1\n    return index + array_ops.gather(offsets, t_index)",
    "docstring": "Adjusts index to account for elements to be skipped.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\shuffle_ops.py",
    "ast_data": "FunctionDef name:_adjust_index arg:index arg:thresholds arg:offsets arguments arg arg arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_call_end_callbacks_on_future",
    "source_code": "def _call_end_callbacks_on_future(self, fut: Future[Any]) -> Future[Any]:\n    if not self.run_callbacks_on_exit:\n        raise RuntimeError('_call_end_callbacks_on_future can only be called once.')\n    self.run_callbacks_on_exit = False\n    record = self.record\n    assert record is not None\n    if not torch.jit.is_scripting():\n        with torch._C.DisableTorchFunctionSubclass():\n            profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut._RecordFunction(record, fut)\n    else:\n        profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut)\n    return profiled_future",
    "docstring": "Use for profiling async calls that return a future. Calling this function will extend recording beyond this scope, until the future is satisfied. It is useful for profiling the end to end time of asynchronous calls. This function should only be called once to attach the callback onto the future, and will throw if called multiple times. Args: fut: (torch._C.Future): future for which to schedule callback for. Returns: A future that completes with the value of the passed in future when the profiling callbacks have ran.",
    "type": "method",
    "file_path": "pytorch\\torch\\autograd\\profiler.py",
    "ast_data": "FunctionDef name:_call_end_callbacks_on_future arg:self arg:fut arguments arg arg If Raise Call Assign Assign Compare If Call With Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_common_unshard_pre_state_dict_hook",
    "source_code": "def _common_unshard_pre_state_dict_hook(module: nn.Module, fsdp_state: _FSDPState, offload_to_cpu: bool, rank0_only: bool) -> None:\n    if not _should_unshard_params(fsdp_state):\n        return\n    _enter_unshard_params_ctx(module, fsdp_state, writeback=False, offload_to_cpu=offload_to_cpu, rank0_only=rank0_only)",
    "docstring": "Performs the pre-state_dict tasks shared by all state_dict types that require ``. FULL_STATE_DICT and SHARDED_STATE_DICT use this hook.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_common_unshard_pre_state_dict_hook arg:module arg:fsdp_state arg:offload_to_cpu arg:rank0_only arguments arg arg arg arg If Call Return return:no Call"
  },
  {
    "library": "pytorch",
    "name": "clone_tensor",
    "source_code": "def clone_tensor(x):\n    y = x.clone().requires_grad_(x.requires_grad)\n    if x.is_leaf and x.grad is not None:\n        y.grad = x.grad.clone()\n    return y",
    "docstring": "Clone the tensor and its gradient",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:clone_tensor arg:x arguments arg Assign Call Call If BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Optional",
    "source_code": "@tf_export('experimental.Optional', 'data.experimental.Optional')\n@deprecation.deprecated_endpoints('data.experimental.Optional')\nclass Optional(composite_tensor.CompositeTensor, metaclass=abc.ABCMeta):\n\n    @abc.abstractmethod\n    def has_value(self, name=None):\n        raise NotImplementedError('Optional.has_value()')\n\n    @abc.abstractmethod\n    def get_value(self, name=None):\n        raise NotImplementedError('Optional.get_value()')\n\n    @abc.abstractproperty\n    def element_spec(self):\n        raise NotImplementedError('Optional.element_spec')\n\n    @staticmethod\n    def empty(element_spec):\n        return _OptionalImpl(gen_optional_ops.optional_none(), element_spec)\n\n    @staticmethod\n    def from_value(value):\n        with ops.name_scope('optional') as scope:\n            with ops.name_scope('value'):\n                element_spec = structure.type_spec_from_value(value)\n                encoded_value = structure.to_tensor_list(element_spec, value)\n        return _OptionalImpl(gen_optional_ops.optional_from_value(encoded_value, name=scope), element_spec)",
    "docstring": "Represents a value that may or may not be present. A can represent the result of an operation that may fail as a value, rather than raising an exception and halting execution. For example, returns a that either contains the next element of an iterator if one exists, or an \"empty\" value that indicates the end of the sequence has been reached. can only be used with values that are convertible to or . One can create a from a value using the method: >>> optional = tf.experimental.Optional.from_value(42) >>> print(optional.has_value()) tf.Tensor(True, shape=(), dtype=bool) >>> print(optional.get_value()) tf.Tensor(42, shape=(), dtype=int32) or without a value using the method: >>> optional = tf.experimental.Optional.empty( ... tf.TensorSpec(shape=(), dtype=tf.int32, name=None)) >>> print(optional.has_value()) tf.Tensor(False, shape=(), dtype=bool)",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\optional_ops.py",
    "ast_data": "ClassDef name:Optional FunctionDef name:has_value arg:self arg:name arguments arg arg Raise Call FunctionDef name:get_value arg:self arg:name arguments arg arg Raise Call FunctionDef name:element_spec arg:self arguments arg Raise Call FunctionDef name:empty arg:element_spec arguments arg Return return:yes Call Call FunctionDef name:from_value arg:value arguments arg With Call With Call Assign Call Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "cryptography",
    "name": "get_public",
    "source_code": "def get_public(self, data: memoryview) -> tuple[tuple[memoryview], memoryview]:\n    point, data = _get_sshstr(data)\n    return ((point,), data)",
    "docstring": "Ed25519 public fields",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:get_public arg:self arg:data arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "monitor",
    "source_code": "@tf_export('profiler.experimental.client.monitor', v1=[])\ndef monitor(service_addr, duration_ms, level=1):\n    return _pywrap_profiler_plugin.monitor(_strip_prefix(service_addr, _GRPC_PREFIX), duration_ms, level, True)",
    "docstring": "Sends grpc requests to profiler server to perform on-demand monitoring. The monitoring result is a light weight performance summary of your model execution. This method will block the caller thread until it receives the monitoring result. This method currently supports Cloud TPU only. Args: service_addr: gRPC address of profiler service e.g. grpc://10.0.0.2:8466. duration_ms: Duration of monitoring in ms. level: Choose a monitoring level between 1 and 2 to monitor your job. Level 2 is more verbose than level 1 and shows more metrics. Returns: A string of monitoring output. Example usage:",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\profiler_client.py",
    "ast_data": "FunctionDef name:monitor arg:service_addr arg:duration_ms arg:level arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "smart_bytes",
    "source_code": "def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):\n    if isinstance(s, Promise):\n        return s\n    return force_bytes(s, encoding, strings_only, errors)",
    "docstring": "Return a bytestring version of 's', encoded as specified in 'encoding'. If strings_only is True, don't convert (some) non-string-like objects.",
    "type": "function",
    "file_path": "django\\django\\utils\\encoding.py",
    "ast_data": "FunctionDef name:smart_bytes arg:s arg:encoding arg:strings_only arg:errors arguments arg arg arg arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_transpose_if_necessary",
    "source_code": "def _transpose_if_necessary(tensor, perm):\n    if perm != list(range(len(perm))):\n        return array_ops.transpose(tensor, perm=perm)\n    else:\n        return tensor",
    "docstring": "Like transpose(), but avoids creating a new tensor if possible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py",
    "ast_data": "FunctionDef name:_transpose_if_necessary arg:tensor arg:perm arguments arg arg If Compare Call Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_ConvTransposeNd",
    "source_code": "class _ConvTransposeNd(_ConvNd, torch.nn.modules.conv._ConvTransposeNd):\n\n    @staticmethod\n    def from_float(cls, float_conv, weight_qparams):\n        qref_conv = cls(float_conv.in_channels, float_conv.out_channels, float_conv.kernel_size, float_conv.stride, float_conv.padding, float_conv.output_padding, float_conv.groups, float_conv.bias is not None, float_conv.dilation, float_conv.padding_mode, device=float_conv.weight.device, dtype=float_conv.weight.dtype, weight_qparams=weight_qparams)\n        qref_conv.weight = torch.nn.Parameter(float_conv.weight.detach())\n        if float_conv.bias is not None:\n            qref_conv.bias = torch.nn.Parameter(float_conv.bias.detach())\n        return qref_conv",
    "docstring": "A reference version of nn.quantized.ConvTranspose2d we will not pack the parameters in this module, since weight packing is an optimization for quantized backends supported in PyTorch (fbgemm/qnnpack), this is useful when user want to use this module in other backends like Glow.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\conv.py",
    "ast_data": "ClassDef name:_ConvTransposeNd FunctionDef name:from_float arg:cls arg:float_conv arg:weight_qparams arguments arg arg arg Assign Call Compare Assign Call Call If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_Convertible",
    "source_code": "class _Convertible(object):\n\n    def __init__(self, enclosing_graph):\n        self._enclosing_graph = enclosing_graph\n        self._outgoing_edges = []\n        self._converted_self = None\n\n    def converted_self(self):\n        raise NotImplementedError\n\n    def convert_variable_to_constant(self, incoming_edge, tensor_data):\n        raise NotImplementedError\n\n    def create_edges(self):\n        raise NotImplementedError\n\n    def add_outgoing_edge(self, edge):\n        self._outgoing_edges.append(edge)\n\n    @property\n    def converted_enclosing_graph(self):\n        return self._enclosing_graph.converted_self()\n\n    @property\n    def outgoing_edges(self):\n        return self._outgoing_edges",
    "docstring": "An entity that can have variables converted to constants.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "ClassDef name:_Convertible FunctionDef name:__init__ arg:self arg:enclosing_graph arguments arg arg Assign Assign Assign FunctionDef name:converted_self arg:self arguments arg Raise FunctionDef name:convert_variable_to_constant arg:self arg:incoming_edge arg:tensor_data arguments arg arg arg Raise FunctionDef name:create_edges arg:self arguments arg Raise FunctionDef name:add_outgoing_edge arg:self arg:edge arguments arg arg Call FunctionDef name:converted_enclosing_graph arg:self arguments arg Return return:yes Call FunctionDef name:outgoing_edges arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_supported_filetypes",
    "source_code": "@classmethod\ndef get_supported_filetypes(cls):\n    return cls.filetypes",
    "docstring": "Return dict of savefig file formats supported by this backend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:get_supported_filetypes arg:cls arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_load_op",
    "source_code": "def _load_op(self, arg: Union[onnx.ModelProto, str], cache_dir: Optional[str]=None) -> onnx.ModelProto:\n    if isinstance(arg, str):\n        return kornia.onnx.utils.ONNXLoader.load_model(arg, cache_dir=cache_dir)\n    if isinstance(arg, onnx.ModelProto):\n        return arg\n    raise ValueError(f'Invalid argument type. Got {type(arg)}')",
    "docstring": "Load an ONNX model, either from a file path or use the provided ONNX ModelProto. Args: arg: Either an ONNX ModelProto object or a file path to an ONNX model. cache_dir: Where to read onnx objects from if stored on disk. Returns: onnx.ModelProto: The loaded ONNX model.",
    "type": "method",
    "file_path": "kornia\\kornia\\core\\mixin\\onnx.py",
    "ast_data": "FunctionDef name:_load_op arg:self arg:arg arg:cache_dir arguments arg arg arg If Call Return return:yes Call If Call Return return:yes Raise Call Call"
  },
  {
    "library": "scipy",
    "name": "HessianLinearOperator",
    "source_code": "class HessianLinearOperator:\n\n    def __init__(self, hessp, n):\n        self.hessp = hessp\n        self.n = n\n\n    def __call__(self, x, *args):\n\n        def matvec(p):\n            return self.hessp(x, p, *args)\n        return LinearOperator((self.n, self.n), matvec=matvec)",
    "docstring": "Build LinearOperator from hessp",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\minimize_trustregion_constr.py",
    "ast_data": "ClassDef name:HessianLinearOperator FunctionDef name:__init__ arg:self arg:hessp arg:n arguments arg arg arg Assign Assign FunctionDef name:__call__ arg:self arg:x arguments arg arg arg FunctionDef name:matvec arg:p arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "module_to_dict",
    "source_code": "def module_to_dict(module, omittable=lambda k: k.startswith('_') or not k.isupper()):\n    return {k: repr(getattr(module, k)) for k in dir(module) if not omittable(k)}",
    "docstring": "Convert a module namespace to a Python dictionary.",
    "type": "function",
    "file_path": "django\\django\\core\\management\\commands\\diffsettings.py",
    "ast_data": "FunctionDef name:module_to_dict arg:module arg:omittable arguments arg arg arguments arg BoolOp Call Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_ragged_key",
    "source_code": "def _add_ragged_key(self, key, value_type, split_type):\n    if key in self.ragged_keys:\n        original_value_type = self.ragged_value_types[self.ragged_keys.index(key)]\n        original_split_type = self.ragged_split_types[self.ragged_keys.index(key)]\n        if original_value_type != value_type:\n            raise ValueError(f'Conflicting type {original_value_type} vs {value_type} for feature {key}.')\n        if original_split_type != split_type:\n            raise ValueError(f'Conflicting partition type {original_split_type} vs {split_type} for feature {key}.')\n    else:\n        self.ragged_keys.append(key)\n        self.ragged_value_types.append(value_type)\n        self.ragged_split_types.append(split_type)",
    "docstring": "Adds a ragged key & dtype, checking for duplicates.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py",
    "ast_data": "FunctionDef name:_add_ragged_key arg:self arg:key arg:value_type arg:split_type arguments arg arg arg arg If Compare Assign Call Assign Call If Compare Raise Call If Compare Raise Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_type_proto",
    "source_code": "@classmethod\ndef experimental_type_proto(cls) -> Type[tensor_shape_pb2.TensorShapeProto]:\n    return tensor_shape_pb2.TensorShapeProto",
    "docstring": "Returns the type of proto associated with TensorShape serialization.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:experimental_type_proto arg:cls arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_ranges",
    "source_code": "def parse_ranges(range_string):\n    range_string = range_string.strip()\n    if not range_string:\n        return []\n    if 'inf' in range_string:\n        range_string = re.sub('inf', repr(sys.float_info.max), range_string)\n    ranges = ast.literal_eval(range_string)\n    if isinstance(ranges, list) and (not isinstance(ranges[0], list)):\n        ranges = [ranges]\n    for item in ranges:\n        if len(item) != 2:\n            raise ValueError('Incorrect number of elements in range')\n        elif not isinstance(item[0], (int, float)):\n            raise ValueError('Incorrect type in the 1st element of range: %s' % type(item[0]))\n        elif not isinstance(item[1], (int, float)):\n            raise ValueError('Incorrect type in the 2nd element of range: %s' % type(item[0]))\n    return ranges",
    "docstring": "Parse a string representing numerical range(s). Args: range_string: (str) A string representing a numerical range or a list of them. For example: \"[-1.0,1.0]\", \"[-inf, 0]\", \"[[-inf, -1.0], [1.0, inf]]\" Returns: (list of list of float) A list of numerical ranges parsed from the input string. Raises: ValueError: If the input doesn't represent a range or a list of ranges.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py",
    "ast_data": "FunctionDef name:parse_ranges arg:range_string arguments arg Assign Call If Return return:no If Compare Assign Call Call Assign Call If BoolOp Call Call Assign For If Compare Call Raise Call If Call Raise Call Call If Call Raise Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "optionxform",
    "source_code": "def optionxform(self, optionstr):\n    return optionstr",
    "docstring": "Keep the option names unedited.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:optionxform arg:self arg:optionstr arguments arg arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Problem02",
    "source_code": "class Problem02(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(2.7, 7.5)]\n        self.global_optimum = 5.145735\n        self.fglob = -1.899599\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        return sin(x) + sin(10.0 / 3.0 * x)",
    "docstring": "Univariate Problem02 objective function. This class defines the Univariate Problem02 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem02}}(x) = \\sin(x) + \\sin \\left(\\frac{10}{3}x \\right) Bound constraints: :math: .. figure:: figures/Problem02.png :alt: Univariate Problem02 function :align: center **Univariate Problem02 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem02 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "embedding_lookup",
    "source_code": "def embedding_lookup(self, features: Any, weights: Optional[Any]=None) -> Tuple[Any, Dict[str, PartitionedCsrFormatTensor]]:\n    if not self._built:\n        self._maybe_build()\n    context = EmbeddingPipeliningContext(_PIPELINE_MODE_FORWARD, self._pipelining)\n    context.Enter()\n    partitioned_tensors = self.enqueue(features, weights)\n    context.Exit()\n    result = self.dequeue(partitioned_tensors)\n    return result",
    "docstring": "Perform embedding lookup on the input feature. Args: features: A nested structure of s, s or s, with the same structure as . Inputs will be downcast to . Only one type out of or is supported per call. weights: If not , a nested structure of s, s or s, matching the above, except that the tensors should be of float type (and they will be downcast to ). For s we assume the are the same for the parallel entries from and similarly for s we assume the row_splits are the same. Raises: ValueError: If the input feature is not one of the Tensor, SparseTensor or RaggedTensor type. TypeError: If the type of any sequence in does not match corresponding sequence in . Similarly for , if not . Returns: packed_activations: Embedding lookup results packed as the same sequence of the input feature. packed_output: A dict of PartitionedCsrFormatTensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py",
    "ast_data": "FunctionDef name:embedding_lookup arg:self arg:features arg:weights arguments arg arg arg If Call Assign Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "unit_circle",
    "source_code": "@classmethod\ndef unit_circle(cls):\n    if cls._unit_circle is None:\n        cls._unit_circle = cls.circle(center=(0, 0), radius=1, readonly=True)\n    return cls._unit_circle",
    "docstring": "Return the readonly :class: of the unit circle. For most cases, :func: will be what you want.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:unit_circle arg:cls arguments arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_DivGrad",
    "source_code": "@ops.RegisterGradient('Div')\ndef _DivGrad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.inputs[1]\n    cx = math_ops.conj(x)\n    cy = math_ops.conj(y)\n    gx = math_ops.divide(grad, cy)\n    gy = grad * math_ops.divide(math_ops.divide(-cx, cy), cy)\n    return _ReduceGradientArgs(x, y, gx, gy)",
    "docstring": "The gradient for the Div operator.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_DivGrad arg:op arg:grad arguments arg arg Assign Assign Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_gaussian_covariances_diag",
    "source_code": "def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar):\n    avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]\n    avg_means2 = means ** 2\n    return avg_X2 - avg_means2 + reg_covar",
    "docstring": "Estimate the diagonal covariance vectors. Parameters ---------- responsibilities : array-like of shape (n_samples, n_components) X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) means : array-like of shape (n_components, n_features) reg_covar : float Returns ------- covariances : array, shape (n_components, n_features) The covariance vector of the current components.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_estimate_gaussian_covariances_diag arg:resp arg:X arg:nk arg:means arg:reg_covar arguments arg arg arg arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_HistogramMseBruteforce",
    "source_code": "@_implements(_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE)\nclass _HistogramMseBruteforce(_HistogramCalibrationAlgorithmBase):\n\n    def get_min_max_value(self) -> tuple[float, float]:\n        if self._num_bins > 512:\n            logging.warning('num_bins=%d is too large. The HISTOGRAM_MSE_BRUTEFORCE method tests all histogram mid value pairs, so it may take a long time.', self._num_bins)\n        mse_min = (float('inf'), float('inf'), float('inf'))\n        for left, right in itertools.combinations(range(self._num_bins), 2):\n            quant_min, quant_max = (self._hist_mids[left], self._hist_mids[right])\n            mse_tuple = self._get_weighted_mean_squared_error(quant_min, quant_max)\n            mse_min = min(mse_tuple, mse_min)\n        min_value, max_value = (mse_min[1], mse_min[2])\n        return (min_value, max_value)",
    "docstring": "HistogramMseBruteforce for calculating min and max values of calibration result.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "ClassDef name:_HistogramMseBruteforce FunctionDef name:get_min_max_value arg:self arguments arg If Compare Call Assign Call Call Call For Call Call Assign Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "remove_internal",
    "source_code": "def remove_internal(self, sprite):\n    self._spritelist.remove(sprite)\n    old_rect = self.spritedict[sprite]\n    if old_rect is not self._init_rect:\n        self.lostsprites.append(old_rect)\n    if hasattr(sprite, 'rect'):\n        self.lostsprites.append(sprite.rect)\n    del self.spritedict[sprite]\n    del self._spritelayers[sprite]",
    "docstring": "Do not use this method directly. The group uses it to add a sprite.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:remove_internal arg:self arg:sprite arguments arg arg Call Assign If Compare Call If Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_sync_debug_mode",
    "source_code": "def set_sync_debug_mode(debug_mode: Union[int, str]) -> None:\n    _lazy_init()\n    if isinstance(debug_mode, str):\n        if debug_mode == 'default':\n            debug_mode = 0\n        elif debug_mode == 'warn':\n            debug_mode = 1\n        elif debug_mode == 'error':\n            debug_mode = 2\n        else:\n            raise RuntimeError('invalid value of debug_mode, expected one of `default`, `warn`, `error`')\n    torch._C._cuda_set_sync_debug_mode(debug_mode)",
    "docstring": "Set the debug mode for cuda synchronizing operations. Args: debug_mode(str or int): if \"default\" or 0, don't error or warn on synchronizing operations, if \"warn\" or 1, warn on synchronizing operations, if \"error\" or 2, error out synchronizing operations. Warning: This is an experimental feature, and not all synchronizing operations will trigger warning or error. In particular, operations in torch.distributed and torch.sparse namespaces are not covered yet.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:set_sync_debug_mode arg:debug_mode arguments arg Call If Call If Compare Assign If Compare Assign If Compare Assign Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "dynamic_shapes",
    "source_code": "def dynamic_shapes(self, m, args, kwargs=None):\n    dynamic_shapes, *other_dynamic_shapes = [_tree_map_with_path(lambda path, t: tuple(t.shape) if isinstance(t, torch.Tensor) else t, _combine_args(m, args, kwargs)) for args, kwargs in [(args, kwargs), *self._examples]]\n\n    def _mark_dynamism(v, *other_vs):\n        if not all((type(v) == type(other) for other in other_vs)):\n            raise ValueError(f'The following inputs were found to have differing types, so they cannot be marked as dynamic: {(v,) + other_vs}.')\n        if isinstance(v, int) and (not isinstance(v, bool)):\n            if all((other_v == v for other_v in other_vs)):\n                return None\n            else:\n                return Dim.DYNAMIC\n        else:\n            if not all((other_v == v for other_v in other_vs)):\n                raise ValueError(f'The following inputs were found to have differing values, but they cannot be marked as dynamic: {(v,) + other_vs}.')\n            return None\n    return tree_map(_mark_dynamism, dynamic_shapes, *other_dynamic_shapes, is_leaf=lambda i: type(i) is int)",
    "docstring": "Infers a :func: pytree structure by merging shapes of the original input :func: and :func: and of each additional input args and kwargs.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\dynamic_shapes.py",
    "ast_data": "FunctionDef name:dynamic_shapes arg:self arg:m arg:args arg:kwargs arguments arg arg arg arg Assign Call arguments arg arg Call Call Call FunctionDef name:_mark_dynamism arg:v arguments arg arg If Call Compare Call Call Raise Call If BoolOp Call Call If Call Compare Return return:no Return return:yes If Call Compare Raise Call Return return:no Return return:yes Call arguments arg Compare Call"
  },
  {
    "library": "pytorch",
    "name": "set_unbacked_var_to_val",
    "source_code": "@record_shapeenv_event()\ndef set_unbacked_var_to_val(self, k: sympy.Symbol, v: int) -> None:\n    log.info('set_unbacked_var_to_val %s = %s', k, v)\n    self.unbacked_var_to_val[k] = sympy.sympify(v)",
    "docstring": "Used only when propagate_real_tensors; registers a value for an unbacked symbol, which can be used last resort to resolve hints.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:set_unbacked_var_to_val arg:self arg:k arg:v arguments arg arg arg Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_use_rpc_pickler",
    "source_code": "@contextlib.contextmanager\ndef _use_rpc_pickler(rpc_pickler):\n    global _default_pickler\n    _default_pickler = rpc_pickler\n    try:\n        yield\n    finally:\n        _default_pickler = _internal_rpc_pickler",
    "docstring": "rpc_pickler: (.internal._InternalRPCPickler) Overrides the default RPC pickler",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\rpc\\api.py",
    "ast_data": "FunctionDef name:_use_rpc_pickler arg:rpc_pickler arguments arg Assign Try Assign"
  },
  {
    "library": "tensorflow",
    "name": "_set_shape_list_attr",
    "source_code": "def _set_shape_list_attr(self, attr_name, shapes) -> None:\n    shapes = [s.as_proto() for s in shapes]\n    shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)\n    self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))",
    "docstring": "Private method used to set a list(shape) attribute in the node_def.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_set_shape_list_attr arg:self arg:attr_name arg:shapes arguments arg arg arg Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "WorkerTimerArgs",
    "source_code": "@dataclasses.dataclass(frozen=True)\nclass WorkerTimerArgs:\n    stmt: str\n    setup: str = 'pass'\n    global_setup: str = ''\n    num_threads: int = 1\n    language: Language = Language.PYTHON",
    "docstring": "Container for Timer constructor arguments. This dataclass serves two roles. First, it is a simple interface for defining benchmarks. (See core.api.GroupedStmts and core.api.GroupedModules for the advanced interfaces.) Second, it provides serialization for controlling workers. is not pickleable, so instead the main process will pass instances to workers for processing.",
    "type": "class",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\worker\\main.py",
    "ast_data": "ClassDef name:WorkerTimerArgs Call"
  },
  {
    "library": "tensorflow",
    "name": "repeat_ranges",
    "source_code": "def repeat_ranges(params, splits, repeats):\n    splits_checks = [check_ops.assert_non_negative(splits, message=\"Input argument 'splits' must be non-negative\"), check_ops.assert_integer(splits, message=f\"Input argument 'splits' must be integer, but got {splits.dtype} instead\")]\n    repeats_checks = [check_ops.assert_non_negative(repeats, message=\"Input argument 'repeats' must be non-negative\"), check_ops.assert_integer(repeats, message=f\"Input argument 'repeats' must be integer, but got {repeats.dtype} instead\")]\n    splits = control_flow_ops.with_dependencies(splits_checks, splits)\n    repeats = control_flow_ops.with_dependencies(repeats_checks, repeats)\n    if repeats.shape.ndims != 0:\n        repeated_starts = repeat(splits[:-1], repeats, axis=0)\n        repeated_limits = repeat(splits[1:], repeats, axis=0)\n    else:\n        repeated_splits = repeat(splits, repeats, axis=0)\n        n_splits = array_ops.shape(repeated_splits, out_type=repeats.dtype)[0]\n        repeated_starts = repeated_splits[:n_splits - repeats]\n        repeated_limits = repeated_splits[repeats:]\n    one = array_ops.ones((), repeated_starts.dtype)\n    offsets = gen_ragged_math_ops.ragged_range(repeated_starts, repeated_limits, one)\n    return array_ops.gather(params, offsets.rt_dense_values)",
    "docstring": "Repeats each range of (as specified by ) times. Let the th range of be defined as . Then this function returns a tensor containing range 0 repeated times, followed by range 1 repeated , ..., followed by the last range repeated times. Args: params: The whose values should be repeated. splits: A splits tensor indicating the ranges of that should be repeated. Elements should be non-negative integers. repeats: The number of times each range should be repeated. Supports broadcasting from a scalar value. Elements should be non-negative integers. Returns: A with the same rank and type as . #### Example: >>> print(repeat_ranges( ... params=tf.constant(['a', 'b', 'c']), ... splits=tf.constant([0, 2, 3]), ... repeats=tf.constant(3))) tf.Tensor([b'a' b'b' b'a' b'b' b'a' b'b' b'c' b'c' b'c'], shape=(9,), dtype=string)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_util.py",
    "ast_data": "FunctionDef name:repeat_ranges arg:params arg:splits arg:repeats arguments arg arg arg Assign Call Call Assign Call Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Assign Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "compute_utilization",
    "source_code": "def compute_utilization(filename: str, total_length: float):\n    events = get_chrome_trace_events(filename)\n    global gpu_pids\n    gpu_pids = []\n    for event in events:\n        if 'name' not in event:\n            continue\n        if event['name'] == 'process_labels' and 'GPU' in event['args']['labels']:\n            gpu_pids.append(event['pid'])\n    total_length = total_length * 1000000.0\n    sorted_gpu_events = get_sorted_gpu_events(events)\n    utilization = get_duration(sorted_gpu_events) / total_length\n    sorted_gpu_mm_conv_events = get_sorted_gpu_mm_conv_events(events)\n    mm_conv_utilization = get_duration(sorted_gpu_mm_conv_events) / total_length\n    return (utilization, mm_conv_utilization)",
    "docstring": "Process the chrome traces outputs by the pytorch profiler to compute GPU Utilization and percent of times spent on matmul and convolution Args: filename(str): Name of chrome traces file produced by pytorch profiler total_length(float): total length of the process without profiler in second Return: tuple: (GPU Utilization, percent of time spent on matmul and convolution)",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\benchmark_utils.py",
    "ast_data": "FunctionDef name:compute_utilization arg:filename arg:total_length arguments arg arg Assign Call Assign For If Compare If BoolOp Compare Compare Call Assign Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "grad_state",
    "source_code": "@property\ndef grad_state(self):\n    return self._grad_state",
    "docstring": "The gradient loop state.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:grad_state arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "make_dot_dimension_numbers",
    "source_code": "def make_dot_dimension_numbers(dimension_numbers: DotDimensionNumbers | tuple[tuple[list[int], list[int]], tuple[list[int], list[int]]]) -> DotDimensionNumbers:\n    if isinstance(dimension_numbers, (list, tuple)):\n        (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers\n        dot_dims_proto = DotDimensionNumbers()\n        dot_dims_proto.lhs_contracting_dimensions.extend(lhs_contract)\n        dot_dims_proto.rhs_contracting_dimensions.extend(rhs_contract)\n        dot_dims_proto.lhs_batch_dimensions.extend(lhs_batch)\n        dot_dims_proto.rhs_batch_dimensions.extend(rhs_batch)\n        return dot_dims_proto\n    else:\n        return dimension_numbers",
    "docstring": "Builds a DotDimensionNumbers object from a specification. Args: dimension_numbers: either a or a nested tuple of lists of integers representing the dimensions to treat as contracting dimensions and batch dimensions on each input operand. Returns: A object.",
    "type": "function",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "FunctionDef name:make_dot_dimension_numbers arg:dimension_numbers arguments arg If Call Assign Assign Call Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "RArrow",
    "source_code": "@_register_style(_style_list)\nclass RArrow(LArrow):\n\n    def __call__(self, x0, y0, width, height, mutation_size):\n        p = BoxStyle.LArrow.__call__(self, x0, y0, width, height, mutation_size)\n        p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]\n        return p",
    "docstring": "A box in the shape of a right-pointing arrow.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:RArrow FunctionDef name:__call__ arg:self arg:x0 arg:y0 arg:width arg:height arg:mutation_size arguments arg arg arg arg arg arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_range_tensor",
    "source_code": "def is_range_tensor(t):\n    return tensor_util.is_tf_type(t) and hasattr(t, 'op') and (t.op.type == 'Range')",
    "docstring": "Returns True if a tensor is the result of a tf.range op. Best effort.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\utils\\tensors.py",
    "ast_data": "FunctionDef name:is_range_tensor arg:t arguments arg Return return:yes BoolOp Call Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "replace_string_in_line",
    "source_code": "def replace_string_in_line(search, replace, filename):\n    with open(filename, 'r') as source:\n        content = source.read()\n    with open(filename, 'w') as source:\n        source.write(re.sub(search, replace, content))",
    "docstring": "Replace with sed when regex is required.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\ci_build\\update_version.py",
    "ast_data": "FunctionDef name:replace_string_in_line arg:search arg:replace arg:filename arguments arg arg arg With Call Assign Call With Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_namespace_and_device",
    "source_code": "def get_namespace_and_device(*array_list, remove_none=True, remove_types=(str,), xp=None):\n    skip_remove_kwargs = dict(remove_none=False, remove_types=[])\n    array_list = _remove_non_arrays(*array_list, remove_none=remove_none, remove_types=remove_types)\n    arrays_device = device(*array_list, **skip_remove_kwargs)\n    if xp is None:\n        xp, is_array_api = get_namespace(*array_list, **skip_remove_kwargs)\n    else:\n        xp, is_array_api = (xp, True)\n    if is_array_api:\n        return (xp, is_array_api, arrays_device)\n    else:\n        return (xp, False, arrays_device)",
    "docstring": "Combination into one single function of and . Parameters ---------- *array_list : array objects Array objects. remove_none : bool, default=True Whether to ignore None objects passed in arrays. remove_types : tuple or list, default=(str,) Types to ignore in the arrays. xp : module, default=None Precomputed array namespace module. When passed, typically from a caller that has already performed inspection of its own inputs, skips array namespace inspection. Returns ------- namespace : module Namespace shared by array objects. If any of the are not arrays, the namespace defaults to NumPy. is_array_api_compliant : bool True if the arrays are containers that implement the Array API spec. Always False when array_api_dispatch=False. device : device object (see the \"Device Support\" section of the array API spec).",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py",
    "ast_data": "FunctionDef name:get_namespace_and_device arguments arg arg arg arg Assign Call Assign Call Assign Call If Compare Assign Call Assign If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "save_counter",
    "source_code": "@property\ndef save_counter(self):\n    return self.checkpointer().save_counter",
    "docstring": "An integer variable numbering the checkpoint events. This is maintained by the underlying tf.train.Checkpoint object employed by AsyncCheckpoint class. The number starts at 0 and gets incremented for each checkpoint event. Returns: The save counter variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py",
    "ast_data": "FunctionDef name:save_counter arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "from_PreparedConstraint",
    "source_code": "@classmethod\ndef from_PreparedConstraint(cls, constraint):\n    lb, ub = constraint.bounds\n    cfun = constraint.fun\n    keep_feasible = constraint.keep_feasible\n    if np.all(lb == -np.inf) and np.all(ub == np.inf):\n        return cls.empty(cfun.n)\n    if np.all(lb == -np.inf) and np.all(ub == np.inf):\n        return cls.empty(cfun.n)\n    elif np.all(lb == ub):\n        return cls._equal_to_canonical(cfun, lb)\n    elif np.all(lb == -np.inf):\n        return cls._less_to_canonical(cfun, ub, keep_feasible)\n    elif np.all(ub == np.inf):\n        return cls._greater_to_canonical(cfun, lb, keep_feasible)\n    else:\n        return cls._interval_to_canonical(cfun, lb, ub, keep_feasible)",
    "docstring": "Create an instance from object.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\canonical_constraint.py",
    "ast_data": "FunctionDef name:from_PreparedConstraint arg:cls arg:constraint arguments arg arg Assign Assign Assign If BoolOp Call Compare Call Compare Return return:yes Call If BoolOp Call Compare Call Compare Return return:yes Call If Call Compare Return return:yes Call If Call Compare Return return:yes Call If Call Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "add_config_value",
    "source_code": "def add_config_value(self, name: str, default: Any, rebuild: _ConfigRebuild, types: type | Collection[type] | ENUM=(), description: str='') -> None:\n    logger.debug('[app] adding config value: %r', (name, default, rebuild, types))\n    self.config.add(name=name, default=default, rebuild=rebuild, types=types, description=description)",
    "docstring": "Register a configuration value. This is necessary for Sphinx to recognize new values and set default values accordingly. :param name: The name of the configuration value. It is recommended to be prefixed with the extension name (ex. ``) to a string. However, booleans are still accepted and converted internally. .. versionadded:: 7.4 The *description* parameter.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_config_value arg:self arg:name arg:default arg:rebuild arg:types arg:description arguments arg arg arg arg arg arg Call Call"
  },
  {
    "library": "numpy",
    "name": "reduce",
    "source_code": "def reduce(self, target, axis=np._NoValue):\n    target = narray(target, copy=None, subok=True)\n    m = getmask(target)\n    if axis is np._NoValue and target.ndim > 1:\n        name = self.__name__\n        warnings.warn(f'In the future the default for ma.{name}.reduce will be axis=0, not the current None, to match np.{name}.reduce. Explicitly pass 0 or None to silence this warning.', MaskedArrayFutureWarning, stacklevel=2)\n        axis = None\n    if axis is not np._NoValue:\n        kwargs = {'axis': axis}\n    else:\n        kwargs = {}\n    if m is nomask:\n        t = self.f.reduce(target, **kwargs)\n    else:\n        target = target.filled(self.fill_value_func(target)).view(type(target))\n        t = self.f.reduce(target, **kwargs)\n        m = umath.logical_and.reduce(m, **kwargs)\n        if hasattr(t, '_mask'):\n            t._mask = m\n        elif m:\n            t = masked\n    return t",
    "docstring": "Reduce target along the given axis.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:reduce arg:self arg:target arg:axis arguments arg arg arg Assign Call Assign Call If BoolOp Compare Compare Assign Call Assign If Compare Assign Assign If Compare Assign Call Assign Call Call Call Call Assign Call Assign Call If Call Assign If Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_dpi",
    "source_code": "def set_dpi(self, val):\n    self.dpi = val\n    self.stale = True",
    "docstring": "Set the resolution of the figure in dots-per-inch. Parameters ---------- val : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:set_dpi arg:self arg:val arguments arg arg Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_ovo_binary",
    "source_code": "def _fit_ovo_binary(estimator, X, y, i, j, fit_params):\n    cond = np.logical_or(y == i, y == j)\n    y = y[cond]\n    y_binary = np.empty(y.shape, int)\n    y_binary[y == i] = 0\n    y_binary[y == j] = 1\n    indcond = np.arange(_num_samples(X))[cond]\n    fit_params_subset = _check_method_params(X, params=fit_params, indices=indcond)\n    return (_fit_binary(estimator, _safe_split(estimator, X, None, indices=indcond)[0], y_binary, fit_params=fit_params_subset, classes=[i, j]), indcond)",
    "docstring": "Fit a single binary estimator (one-vs-one).",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\multiclass.py",
    "ast_data": "FunctionDef name:_fit_ovo_binary arg:estimator arg:X arg:y arg:i arg:j arg:fit_params arguments arg arg arg arg arg arg Assign Call Compare Compare Assign Assign Call Assign Compare Assign Compare Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_stats",
    "source_code": "def _get_stats(self):\n    return _pywrap_dtensor_device.GetStats(context.context()._handle, self._device_info)",
    "docstring": "Returns the number of cache hit and miss for function compilation. Returns: A dictionary. 'miss': number of cache misses; 'hit': number of cache hits; and 'size': size of cache; miss count.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py",
    "ast_data": "FunctionDef name:_get_stats arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "random_intrinsics",
    "source_code": "def random_intrinsics(low: Union[float, Tensor], high: Union[float, Tensor]) -> Tensor:\n    sampler = torch.distributions.Uniform(low, high)\n    fx, fy, cx, cy = (sampler.sample(torch.Size((1,))) for _ in range(4))\n    zeros, ones = (zeros_like(fx), ones_like(fx))\n    camera_matrix = concatenate([fx, zeros, cx, zeros, fy, cy, zeros, zeros, ones])\n    return camera_matrix.view(1, 3, 3)",
    "docstring": "Generate a random camera matrix based on a given uniform distribution. Args: low: lower range (inclusive). high: upper range (exclusive). Returns: the random camera matrix with the shape of :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\projection.py",
    "ast_data": "FunctionDef name:random_intrinsics arg:low arg:high arguments arg arg Assign Call Assign Call Call Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "end",
    "source_code": "def end(self, session):\n    pass",
    "docstring": "Called at the end of session. The argument can be used in case the hook wants to run final ops, such as saving a last checkpoint. If raises exception other than OutOfRangeError or StopIteration then is not called. Note the difference between and behavior when raises OutOfRangeError or StopIteration. In that case is called but is not called. Args: session: A TensorFlow Session that will be soon closed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py",
    "ast_data": "FunctionDef name:end arg:self arg:session arguments arg arg"
  },
  {
    "library": "sphinx",
    "name": "create_translator",
    "source_code": "def create_translator(self, *args: Any) -> nodes.NodeVisitor:\n    return self.env._registry.create_translator(self, *args)",
    "docstring": "Return an instance of translator. This method returns an instance of `` API.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:create_translator arg:self arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "minimum",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef minimum(x, y):\n    return math_ops.minimum(x, y)",
    "docstring": "Element-wise minimum of two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:minimum arg:x arg:y arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "obrientransform",
    "source_code": "def obrientransform(*args):\n    data = argstoarray(*args).T\n    v = data.var(axis=0, ddof=1)\n    m = data.mean(0)\n    n = data.count(0).astype(float)\n    data -= m\n    data **= 2\n    data *= (n - 1.5) * n\n    data -= 0.5 * v * (n - 1)\n    data /= (n - 1.0) * (n - 2.0)\n    if not ma.allclose(v, data.mean(0)):\n        raise ValueError('Lack of convergence in obrientransform.')\n    return data",
    "docstring": "Computes a transform on input data (any number of columns). Used to test for homogeneity of variance prior to running one-way stats. Each array in `f_oneway()` run on the transformed data and found significant, variances are unequal. From Maxwell and Delaney, p.112. Returns: transformed data for use in an ANOVA",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:obrientransform arguments arg Assign Call Assign Call Assign Call Assign Call Call If Call Call Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "sdpa_flop",
    "source_code": "@register_flop_formula([aten._scaled_dot_product_efficient_attention, aten._scaled_dot_product_flash_attention, aten._scaled_dot_product_cudnn_attention])\ndef sdpa_flop(query_shape, key_shape, value_shape, *args, out_shape=None, **kwargs) -> int:\n    return sdpa_flop_count(query_shape, key_shape, value_shape)",
    "docstring": "Count flops for self-attention.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\flop_counter.py",
    "ast_data": "FunctionDef name:sdpa_flop arg:query_shape arg:key_shape arg:value_shape arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "H",
    "source_code": "def H(self):\n    return '%02d' % self.data.hour",
    "docstring": "Hour, 24-hour format; i.e. '00' to '23'",
    "type": "method",
    "file_path": "django\\django\\utils\\dateformat.py",
    "ast_data": "FunctionDef name:H arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "preprocess_classes",
    "source_code": "def preprocess_classes(input: Tensor) -> Tensor:\n    return input",
    "docstring": "Preprocess input class tags.",
    "type": "function",
    "file_path": "kornia\\kornia\\augmentation\\utils\\helpers.py",
    "ast_data": "FunctionDef name:preprocess_classes arg:input arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_create_data",
    "source_code": "def _create_data(self, obj: NDFrameT, numeric_only: bool=False) -> NDFrameT:\n    if self.on is not None and (not isinstance(self.on, Index)) and (obj.ndim == 2):\n        obj = obj.reindex(columns=obj.columns.difference([self.on], sort=False))\n    if obj.ndim > 1 and numeric_only:\n        obj = self._make_numeric_only(obj)\n    return obj",
    "docstring": "Split data into blocks & return conformed data.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:_create_data arg:self arg:obj arg:numeric_only arguments arg arg arg If BoolOp Compare Call Compare Assign Call Call If BoolOp Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_transform_input_tensor",
    "source_code": "def _transform_input_tensor(self, input_tensor):\n    if not input_tensor.dtype.is_integer:\n        raise ValueError('Invalid input, not integer. key: {} dtype: {}'.format(self.key, input_tensor.dtype))\n    values = input_tensor.values\n    if input_tensor.values.dtype != dtypes.int64:\n        values = math_ops.cast(values, dtypes.int64, name='values')\n    if self.default_value is not None:\n        values = math_ops.cast(input_tensor.values, dtypes.int64, name='values')\n        num_buckets = math_ops.cast(self.num_buckets, dtypes.int64, name='num_buckets')\n        zero = math_ops.cast(0, dtypes.int64, name='zero')\n        values = array_ops.where_v2(math_ops.logical_or(values < zero, values >= num_buckets, name='out_of_range'), array_ops.fill(dims=array_ops.shape(values), value=math_ops.cast(self.default_value, dtypes.int64), name='default_values'), values)\n    return sparse_tensor_lib.SparseTensor(indices=input_tensor.indices, values=values, dense_shape=input_tensor.dense_shape)",
    "docstring": "Returns a SparseTensor with identity values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_transform_input_tensor arg:self arg:input_tensor arguments arg arg If Raise Call Call Assign If Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign Call Call Compare Compare Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "handle",
    "source_code": "@property\ndef handle(self):\n    return self._handle",
    "docstring": "The string representation of this handle.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:handle arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, rotation: Quaternion | So3, translation: Vector3 | Tensor) -> None:\n    super().__init__()\n    if not isinstance(rotation, (Quaternion, So3)):\n        raise TypeError(f'rotation type is {type(rotation)}')\n    if not isinstance(translation, (Vector3, Tensor)):\n        raise TypeError(f'translation type is {type(translation)}')\n    self._translation: Vector3 | Parameter\n    self._rotation: So3\n    if isinstance(translation, Tensor):\n        self._translation = Parameter(translation)\n    else:\n        self._translation = translation\n    if isinstance(rotation, Quaternion):\n        self._rotation = So3(rotation)\n    else:\n        self._rotation = rotation",
    "docstring": "Construct the base class. Internally represented by a unit quaternion and a translation 3-vector. Args: rotation: So3 group encompassing a rotation. translation: Vector3 or translation tensor with the shape of :math:. Example: >>> from kornia.geometry.quaternion import Quaternion >>> q = Quaternion.identity(batch_size=1) >>> s = Se3(q, torch.ones((1, 3))) >>> s.r Parameter containing: tensor([[1., 0., 0., 0.]], requires_grad=True) >>> s.t Parameter containing: tensor([[1., 1., 1.]], requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:rotation arg:translation arguments arg arg arg Call Call If Call Raise Call Call If Call Raise Call Call If Call Assign Call Assign If Call Assign Call Assign"
  },
  {
    "library": "pandas",
    "name": "keys",
    "source_code": "def keys(self, include: str='pandas') -> list[str]:\n    if include == 'pandas':\n        return [n._v_pathname for n in self.groups()]\n    elif include == 'native':\n        assert self._handle is not None\n        return [n._v_pathname for n in self._handle.walk_nodes('/', classname='Table')]\n    raise ValueError(f\"`include` should be either 'pandas' or 'native' but is '{include}'\")",
    "docstring": "Return a list of keys corresponding to objects stored in HDFStore. Parameters ---------- include : str, default 'pandas' When kind equals 'pandas' return pandas objects. When kind equals 'native' return native HDF5 Table objects. Returns ------- list List of ABSOLUTE path-names (e.g. have the leading '/'). Raises ------ raises ValueError if kind has an illegal value See Also -------- HDFStore.info : Prints detailed information on the store. HDFStore.get_node : Returns the node with the key. HDFStore.get_storer : Returns the storer object for a key. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=[\"A\", \"B\"]) >>> store = pd.HDFStore(\"store.h5\", \"w\") # doctest: +SKIP >>> store.put(\"data\", df) # doctest: +SKIP >>> store.get(\"data\") # doctest: +SKIP >>> print(store.keys()) # doctest: +SKIP ['/data1', '/data2'] >>> store.close() # doctest: +SKIP",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:keys arg:self arg:include arguments arg arg If Compare Return return:yes Call If Compare Compare Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "to_pyval",
    "source_code": "def to_pyval(self):\n    if not self._is_eager():\n        raise ValueError('StructuredTensor.to_pyval() is only supported in eager mode.')\n    result = {}\n    for key, value in self._fields.items():\n        if isinstance(value, ops.EagerTensor):\n            value = value.numpy()\n        if isinstance(value, np.ndarray):\n            value = value.tolist()\n        elif isinstance(value, ragged_tensor.RaggedTensor):\n            value = value.to_list()\n        elif isinstance(value, StructuredTensor):\n            value = value.to_pyval()\n        result[key] = value\n    if len(self.shape) > 0:\n        if not result:\n            return _empty_dict_pylist_from_row_partitions(self.row_partitions, self.nrows())\n        return _pyval_field_major_to_node_major(list(result.keys()), list(result.values()), self.rank)\n    else:\n        return result",
    "docstring": "Returns this StructuredTensor as a nested Python dict or list of dicts. Converts this to a nested python value: * with are converted into a dictionary, with an entry for each field. Field names are used as keys and field values are converted to python values. In particular: * Scalar Tensor fields are converted to simple values (such as or or ) * Non-scalar Tensor fields and RaggedTensor fields are converted to nested lists of simple values. * StructuredTensor fields are converted recursively using . * with are converted to nested python s, containing one dictionary for each structure (where each structure's dictionary is defined as described above). Requires that all fields are Eager tensors. >>> tf.experimental.StructuredTensor.from_fields( ... {'a': [1, 2, 3]}, [3]).to_pyval() [{'a': 1}, {'a': 2}, {'a': 3}] Note that . Returns: A nested Python dict or list of dicts.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:to_pyval arg:self arguments arg If Call Raise Call Assign For Call If Call Assign Call If Call Assign Call If Call Assign Call If Call Assign Call Assign If Compare Call If Return return:yes Call Call Return return:yes Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "MovePlaceholderToFront",
    "source_code": "class MovePlaceholderToFront(_pass.Transform):\n\n    def _run(self, *args, **kwargs) -> torch.fx.GraphModule:\n        graph_module = self.module\n        graph = graph_module.graph\n        placeholders = []\n        first_not_placeholder = None\n        for node in graph.nodes:\n            if node.op == 'placeholder':\n                placeholders.append(node)\n            if first_not_placeholder is None and node.op != 'placeholder':\n                first_not_placeholder = node\n        if first_not_placeholder is None:\n            return graph_module\n        for placeholder in placeholders:\n            first_not_placeholder.prepend(placeholder)\n        return graph_module",
    "docstring": "This pass move all placeholder nodes to the front of the graph node list. In torch.fx.Graph, placeholder is a special assignment node. If it's not executed in the beginning, it could overwrite values computed by upstream nodes.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\virtualization.py",
    "ast_data": "ClassDef name:MovePlaceholderToFront FunctionDef name:_run arg:self arguments arg arg arg Assign Assign Assign Assign For If Compare Call If BoolOp Compare Compare Assign If Compare Return return:yes For Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "TransformedPath",
    "source_code": "class TransformedPath(TransformNode):\n\n    def __init__(self, path, transform):\n        _api.check_isinstance(Transform, transform=transform)\n        super().__init__()\n        self._path = path\n        self._transform = transform\n        self.set_children(transform)\n        self._transformed_path = None\n        self._transformed_points = None\n\n    def _revalidate(self):\n        if self._invalid == self._INVALID_FULL or self._transformed_path is None:\n            self._transformed_path = self._transform.transform_path_non_affine(self._path)\n            self._transformed_points = Path._fast_from_codes_and_verts(self._transform.transform_non_affine(self._path.vertices), None, self._path)\n        self._invalid = 0\n\n    def get_transformed_points_and_affine(self):\n        self._revalidate()\n        return (self._transformed_points, self.get_affine())\n\n    def get_transformed_path_and_affine(self):\n        self._revalidate()\n        return (self._transformed_path, self.get_affine())\n\n    def get_fully_transformed_path(self):\n        self._revalidate()\n        return self._transform.transform_path_affine(self._transformed_path)\n\n    def get_affine(self):\n        return self._transform.get_affine()",
    "docstring": "A caches a non-affine transformed copy of the . This cached copy is automatically updated when the non-affine part of the transform changes. .. note:: Paths are considered immutable by this class. Any update to the path's vertices/codes will not trigger a transform recomputation.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "ClassDef name:TransformedPath FunctionDef name:__init__ arg:self arg:path arg:transform arguments arg arg arg Call Call Call Assign Assign Call Assign Assign FunctionDef name:_revalidate arg:self arguments arg If BoolOp Compare Compare Assign Call Assign Call Call Assign FunctionDef name:get_transformed_points_and_affine arg:self arguments arg Call Return return:yes Call FunctionDef name:get_transformed_path_and_affine arg:self arguments arg Call Return return:yes Call FunctionDef name:get_fully_transformed_path arg:self arguments arg Call Return return:yes Call FunctionDef name:get_affine arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_tensorflow_version_lines",
    "source_code": "def get_tensorflow_version_lines(include_dependency_versions=False):\n    lines = ['TensorFlow version: %s' % pywrap_tf_session.__version__]\n    lines.append('')\n    if include_dependency_versions:\n        lines.append('Dependency version(s):')\n        lines.append('  numpy: %s' % np.__version__)\n        lines.append('')\n    return RichTextLines(lines)",
    "docstring": "Generate RichTextLines with TensorFlow version info. Args: include_dependency_versions: Include the version of TensorFlow's key dependencies, such as numpy. Returns: A formatted, multi-line object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py",
    "ast_data": "FunctionDef name:get_tensorflow_version_lines arg:include_dependency_versions arguments arg Assign Call If Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_rewrite_tracepoint_node",
    "source_code": "def _rewrite_tracepoint_node(gm: torch.fx.GraphModule):\n    for node in gm.graph.nodes:\n        if node.target == torch.ops.higher_order._export_tracepoint:\n            if 'path' in node.kwargs:\n                path = _strip_root(node.kwargs['path'])\n                with gm.graph.inserting_before(node):\n                    new_node = gm.graph.create_node('call_function', torch.ops.higher_order._export_tracepoint, args=node.args, kwargs={'path': path, 'kind': node.kwargs['kind']})\n                    new_node.meta = node.meta\n                    node.replace_all_uses_with(new_node)\n                    gm.graph.erase_node(node)",
    "docstring": "In-place modifiy input graph module by replacing the export tracepoint with a new node that has the same target and args, but with the _export_root stripped from path.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "FunctionDef name:_rewrite_tracepoint_node arg:gm arguments arg For If Compare If Compare Assign Call With Call Assign Call Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "formatargspec",
    "source_code": "def formatargspec(args, varargs=None, varkw=None, defaults=None, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), join=joinseq):\n    specs = []\n    if defaults:\n        firstdefault = len(args) - len(defaults)\n    for i in range(len(args)):\n        spec = strseq(args[i], formatarg, join)\n        if defaults and i >= firstdefault:\n            spec = spec + formatvalue(defaults[i - firstdefault])\n        specs.append(spec)\n    if varargs is not None:\n        specs.append(formatvarargs(varargs))\n    if varkw is not None:\n        specs.append(formatvarkw(varkw))\n    return '(' + ', '.join(specs) + ')'",
    "docstring": "Format an argument spec from the 4 values returned by getargspec. The first four arguments are (args, varargs, varkw, defaults). The other four arguments are the corresponding optional formatting functions that are called to turn names and values into strings. The ninth argument is an optional function to format the sequence of arguments.",
    "type": "function",
    "file_path": "numpy\\numpy\\_utils\\_inspect.py",
    "ast_data": "FunctionDef name:formatargspec arg:args arg:varargs arg:varkw arg:defaults arg:formatarg arg:formatvarargs arg:formatvarkw arg:formatvalue arg:join arguments arg arg arg arg arg arg arg arg arg arguments arg arguments arg arguments arg Call Assign If Assign Call Call For Call Call Assign Call If BoolOp Compare Assign Call Call If Compare Call Call If Compare Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "rot_z",
    "source_code": "@classmethod\ndef rot_z(cls, z: Tensor) -> Se3:\n    zs = zeros_like(z)\n    return cls(So3.rot_z(z), stack((zs, zs, zs), -1))",
    "docstring": "Construct a z-axis rotation. Args: z: the z-axis rotation angle.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:rot_z arg:cls arg:z arguments arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_square",
    "source_code": "def is_square(operator_a, operator_b):\n    if operator_a.is_square and operator_b.is_square:\n        return True\n    if operator_a.is_square is False and operator_b.is_square is False:\n        m = operator_a.range_dimension\n        l = operator_b.domain_dimension\n        if m is not None and l is not None:\n            return m == l\n    if operator_a.is_square != operator_b.is_square and (operator_a.is_square is not None and operator_b.is_square is not None):\n        return False\n    return None",
    "docstring": "Return a hint to whether the composition is square.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\property_hint_util.py",
    "ast_data": "FunctionDef name:is_square arg:operator_a arg:operator_b arguments arg arg If BoolOp Return return:yes If BoolOp Compare Compare Assign Assign If BoolOp Compare Compare Return return:yes Compare If BoolOp Compare BoolOp Compare Compare Return return:yes Return return:no"
  },
  {
    "library": "scrapy",
    "name": "Settings",
    "source_code": "class Settings(BaseSettings):\n\n    def __init__(self, values: _SettingsInputT=None, priority: int | str='project'):\n        super().__init__()\n        self.setmodule(default_settings, 'default')\n        for name, val in self.items():\n            if isinstance(val, dict):\n                self.set(name, BaseSettings(val, 'default'), 'default')\n        self.update(values, priority)",
    "docstring": "This object stores Scrapy settings for the configuration of internal components, and can be used for any further customization. It is a direct subclass and supports all methods of :class:. Additionally, after instantiation of this class, the new object will have the global default settings described on :ref: already populated.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "ClassDef name:Settings FunctionDef name:__init__ arg:self arg:values arg:priority arguments arg arg arg Call Call Call For Call If Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_prob_original_static",
    "source_code": "def _get_prob_original_static(initial_dist_t, target_dist_t):\n    init_static = tensor_util.constant_value(initial_dist_t)\n    target_static = tensor_util.constant_value(target_dist_t)\n    if init_static is None or target_static is None:\n        return None\n    else:\n        return np.min(target_static / init_static)",
    "docstring": "Returns the static probability of sampling from the original. returns if it encounters an Op that it isn't defined for. We have some custom logic to avoid this. Args: initial_dist_t: A tensor of the initial distribution. target_dist_t: A tensor of the target distribution. Returns: The probability of sampling from the original distribution as a constant, if it is a constant, or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_get_prob_original_static arg:initial_dist_t arg:target_dist_t arguments arg arg Assign Call Assign Call If BoolOp Compare Compare Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_tril",
    "source_code": "def _get_tril(self):\n    return array_ops.matrix_band_part(self._tril, -1, 0)",
    "docstring": "Gets the kwarg, with upper part zero-d out.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_lower_triangular.py",
    "ast_data": "FunctionDef name:_get_tril arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_constant_value",
    "source_code": "def get_constant_value(x: ir.IRNode) -> Optional[ir.Constant]:\n    if isinstance(x, ir.MutableBox):\n        return get_constant_value(x.data)\n    if isinstance(x, ir.BaseView):\n        return get_constant_value(x.unwrap_view())\n    if isinstance(x, ir.Constant):\n        return x\n    if not isinstance(x, ir.Loops):\n        return None\n    handler = torch._inductor.ops_handler.ExtractConstantsHandler(x.get_device())\n    with V.set_ops_handler(handler), patch.object(ir.FlexibleLayout, 'allow_indexing', True):\n        out = x.inner_fn(*x.inner_fn_args())\n    assert isinstance(out, torch._inductor.virtualized.OpsValue)\n    if isinstance(out.value, ir.Constant):\n        return out.value\n    return None",
    "docstring": "Try convert an arbitrary IR node into an ir.Constant value",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\lowering.py",
    "ast_data": "FunctionDef name:get_constant_value arg:x arguments arg If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes If Call Return return:no Assign Call Call With Call Call Assign Call Call Call If Call Return return:yes Return return:no"
  },
  {
    "library": "authlib",
    "name": "get_expires_in",
    "source_code": "def get_expires_in(self):\n    raise NotImplementedError()",
    "docstring": "A method to get the ``:: def get_expires_in(self): return self.expires_in :return: timestamp int",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:get_expires_in arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "true_dtype",
    "source_code": "@property\ndef true_dtype(self):\n    return self._variable.dtype",
    "docstring": "Deprecated alias of .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\autocast_variable.py",
    "ast_data": "FunctionDef name:true_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_worker_failure",
    "source_code": "def _is_worker_failure(error):\n    if _handle_graph_execution_error_as_worker_failure() and isinstance(error, errors.UnknownError) and ('Graph execution error' in str(error)):\n        logging.info(f'Handling {type(error)}: {str(error)} as worker failure.')\n        return True\n    if isinstance(error, (ClosureInputError, ClosureAbortedError)):\n        error = error.original_exception\n    if _JOB_WORKER_STRING_IDENTIFIER not in str(error):\n        return False\n    if _RPC_ERROR_FROM_PS in str(error):\n        return False\n    if isinstance(error, (errors.UnavailableError, errors.AbortedError)):\n        return True\n    if isinstance(error, errors.InvalidArgumentError):\n        if 'unknown device' in str(error).lower() or 'Primary device is not remote' in str(error) or 'Unable to find the relevant tensor remote_handle' in str(error):\n            return True\n    if isinstance(error, errors.NotFoundError):\n        if 'is neither a type of a primitive operation nor a name of a function registered' in str(error):\n            return True\n    if isinstance(error, errors.CancelledError):\n        return True\n    if isinstance(error, TypeError) and 'Binding inputs to tf.function' in str(error):\n        return True\n    return False",
    "docstring": "Whether the error is considered a worker failure.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_is_worker_failure arg:error arguments arg If BoolOp Call Call Compare Call Call Call Call Return return:yes If Call Assign If Compare Call Return return:yes If Compare Call Return return:yes If Call Return return:yes If Call If BoolOp Compare Call Call Compare Call Compare Call Return return:yes If Call If Compare Call Return return:yes If Call Return return:yes If BoolOp Call Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "ObjType",
    "source_code": "class ObjType:\n    known_attrs = {'searchprio': 1}\n\n    def __init__(self, lname: str, /, *roles: Any, **attrs: Any) -> None:\n        self.lname: str = lname\n        self.roles: tuple[Any, ...] = roles\n        self.attrs: dict[str, Any] = self.known_attrs | attrs",
    "docstring": "An ObjType is the description for a type of object that a domain can document. In the object_types attribute of Domain subclasses, object type names are mapped to instances of this class. Constructor arguments: - *lname*: localized name of the type (do not include domain name) - *roles*: all the roles that can refer to an object of this type - *attrs*: object attributes -- currently only \"searchprio\" is known, which defines the object's priority in the full-text search index, see :meth:.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\domains\\__init__.py",
    "ast_data": "ClassDef name:ObjType Assign FunctionDef name:__init__ arguments arg arg arg arg"
  },
  {
    "library": "scipy",
    "name": "_cholesky_logdet",
    "source_code": "def _cholesky_logdet(self, scale):\n    c_decomp = scipy.linalg.cholesky(scale, lower=True)\n    logdet = 2 * np.sum(np.log(c_decomp.diagonal()))\n    return (c_decomp, logdet)",
    "docstring": "Compute Cholesky decomposition and determine (log(det(scale)). Parameters ---------- scale : ndarray Scale matrix. Returns ------- c_decomp : ndarray The Cholesky decomposition of . logdet : scalar The log of the determinant of . Notes ----- This computation of ``. It is ~2x faster though.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_cholesky_logdet arg:self arg:scale arguments arg arg Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_unique_failures",
    "source_code": "def get_unique_failures(self, jobs: list[Any]) -> dict[str, list[Any]]:\n    failures = defaultdict(list)\n    for job in jobs:\n        if job['conclusion'] == 'failure':\n            found_similar_failure = False\n            if 'failureCaptures' not in job:\n                failures['unclassified'] = [job]\n                continue\n            failureCaptures = ' '.join(job['failureCaptures'])\n            for failure in failures:\n                seq = SequenceMatcher(None, failureCaptures, failure)\n                if seq.ratio() > SIMILARITY_THRESHOLD:\n                    failures[failure].append(job)\n                    found_similar_failure = True\n                    break\n            if not found_similar_failure:\n                failures[failureCaptures] = [job]\n    return failures",
    "docstring": "Returns list of jobs grouped by failureCaptures from the input list",
    "type": "method",
    "file_path": "pytorch\\tools\\alerts\\create_alerts.py",
    "ast_data": "FunctionDef name:get_unique_failures arg:self arg:jobs arguments arg arg Assign Call For If Compare Assign If Compare Assign Assign Call For Assign Call If Compare Call Call Assign If Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "cx_right",
    "source_code": "@property\ndef cx_right(self) -> Tensor:\n    return self.rectified_right_camera[..., 0, 2]",
    "docstring": "Return the x-coordinate of the principal point for the right camera. Returns: tensor of shape :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py",
    "ast_data": "FunctionDef name:cx_right arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "read_source_files_event",
    "source_code": "def read_source_files_event(self, offset):\n    with self._reader_read_locks[self._source_files_path]:\n        proto_string = self._get_reader(self._source_files_path).read(offset)[0]\n    return debug_event_pb2.DebugEvent.FromString(proto_string)",
    "docstring": "Read a DebugEvent proto at given offset from the .source_files file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:read_source_files_event arg:self arg:offset arguments arg arg With Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_compute_divided_differences",
    "source_code": "def _compute_divided_differences(xvals, fvals, N=None, full=True, forward=True):\n    if full:\n        if forward:\n            xvals = np.asarray(xvals)\n        else:\n            xvals = np.array(xvals)[::-1]\n        M = len(xvals)\n        N = M if N is None else min(N, M)\n        DD = np.zeros([M, N])\n        DD[:, 0] = fvals[:]\n        for i in range(1, N):\n            DD[i:, i] = np.diff(DD[i - 1:, i - 1]) / (xvals[i:] - xvals[:M - i])\n        return DD\n    xvals = np.asarray(xvals)\n    dd = np.array(fvals)\n    row = np.array(fvals)\n    idx2Use = 0 if forward else -1\n    dd[0] = fvals[idx2Use]\n    for i in range(1, len(xvals)):\n        denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1]\n        row = np.diff(row)[:] / denom\n        dd[i] = row[idx2Use]\n    return dd",
    "docstring": "Return a matrix of divided differences for the xvals, fvals pairs DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i If full is False, just return the main diagonal(or last row): f[a], f[a, b] and f[a, b, c]. If forward is False, return f[c], f[b, c], f[a, b, c].",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_zeros_py.py",
    "ast_data": "FunctionDef name:_compute_divided_differences arg:xvals arg:fvals arg:N arg:full arg:forward arguments arg arg arg arg arg If If Assign Call Assign Call Assign Call Assign Compare Call Assign Call Assign For Call Assign Call Return return:yes Assign Call Assign Call Assign Call Assign Assign For Call Call Assign Call Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "AbstractEngine",
    "source_code": "class AbstractEngine(metaclass=abc.ABCMeta):\n    has_neg_frac = False\n\n    def __init__(self, expr) -> None:\n        self.expr = expr\n        self.aligned_axes = None\n        self.result_type = None\n        self.result_name = None\n\n    def convert(self) -> str:\n        return printing.pprint_thing(self.expr)\n\n    def evaluate(self) -> object:\n        if not self._is_aligned:\n            self.result_type, self.aligned_axes, self.result_name = align_terms(self.expr.terms)\n        res = self._evaluate()\n        return reconstruct_object(self.result_type, res, self.aligned_axes, self.expr.terms.return_type, self.result_name)\n\n    @property\n    def _is_aligned(self) -> bool:\n        return self.aligned_axes is not None and self.result_type is not None\n\n    @abc.abstractmethod\n    def _evaluate(self):\n        pass",
    "docstring": "Object serving as a base class for all engines.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\computation\\engines.py",
    "ast_data": "ClassDef name:AbstractEngine Assign FunctionDef name:__init__ arg:self arg:expr arguments arg arg Assign Assign Assign Assign FunctionDef name:convert arg:self arguments arg Return return:yes Call FunctionDef name:evaluate arg:self arguments arg If Assign Call Assign Call Return return:yes Call FunctionDef name:_is_aligned arg:self arguments arg Return return:yes BoolOp Compare Compare FunctionDef name:_evaluate arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "inherit_names",
    "source_code": "def inherit_names(names: list[str], delegate: type, cache: bool=False, wrap: bool=False) -> Callable[[type[_ExtensionIndexT]], type[_ExtensionIndexT]]:\n\n    def wrapper(cls: type[_ExtensionIndexT]) -> type[_ExtensionIndexT]:\n        for name in names:\n            meth = _inherit_from_data(name, delegate, cache=cache, wrap=wrap)\n            setattr(cls, name, meth)\n        return cls\n    return wrapper",
    "docstring": "Class decorator to pin attributes from an ExtensionArray to a Index subclass. Parameters ---------- names : List[str] delegate : class cache : bool, default False wrap : bool, default False Whether to wrap the inherited result in an Index.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\extension.py",
    "ast_data": "FunctionDef name:inherit_names arg:names arg:delegate arg:cache arg:wrap arguments arg arg arg arg FunctionDef name:wrapper arg:cls arguments arg For Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "virtualenv",
    "name": "AppDataDisabled",
    "source_code": "class AppDataDisabled(AppData):\n    transient = True\n    can_update = False\n\n    def __init__(self) -> None:\n        pass\n    error = RuntimeError('no app data folder available, probably no write access to the folder')\n\n    def close(self):\n        pass\n\n    def reset(self):\n        pass\n\n    def py_info(self, path):\n        return ContentStoreNA()\n\n    def embed_update_log(self, distribution, for_py_version):\n        return ContentStoreNA()\n\n    def extract(self, path, to_folder):\n        raise self.error\n\n    @contextmanager\n    def locked(self, path):\n        yield\n\n    @property\n    def house(self):\n        raise self.error\n\n    def wheel_image(self, for_py_version, name):\n        raise self.error\n\n    def py_info_clear(self):\n        pass",
    "docstring": "No application cache available (most likely as we don't have write permissions).",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\app_data\\na.py",
    "ast_data": "ClassDef name:AppDataDisabled Assign Assign FunctionDef name:__init__ arg:self arguments arg Assign Call FunctionDef name:close arg:self arguments arg FunctionDef name:reset arg:self arguments arg FunctionDef name:py_info arg:self arg:path arguments arg arg Return return:yes Call FunctionDef name:embed_update_log arg:self arg:distribution arg:for_py_version arguments arg arg arg Return return:yes Call FunctionDef name:extract arg:self arg:path arg:to_folder arguments arg arg arg Raise FunctionDef name:locked arg:self arg:path arguments arg arg FunctionDef name:house arg:self arguments arg Raise FunctionDef name:wheel_image arg:self arg:for_py_version arg:name arguments arg arg arg Raise FunctionDef name:py_info_clear arg:self arguments arg"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start(self):\n\n    def current_ids():\n        name, group = (None, None)\n        if pwd:\n            name = pwd.getpwuid(os.getuid())[0]\n        if grp:\n            group = grp.getgrgid(os.getgid())[0]\n        return (name, group)\n    if self.finalized:\n        if not (self.uid is None and self.gid is None):\n            self.bus.log('Already running as uid: %r gid: %r' % current_ids())\n    elif self.uid is None and self.gid is None:\n        if pwd or grp:\n            self.bus.log('uid/gid not set', level=30)\n    else:\n        self.bus.log('Started as uid: %r gid: %r' % current_ids())\n        if self.gid is not None:\n            os.setgid(self.gid)\n            os.setgroups([])\n        if self.uid is not None:\n            os.setuid(self.uid)\n        self.bus.log('Running as uid: %r gid: %r' % current_ids())\n    if self.finalized:\n        if self.umask is not None:\n            self.bus.log('umask already set to: %03o' % self.umask)\n    elif self.umask is None:\n        self.bus.log('umask not set', level=30)\n    else:\n        old_umask = os.umask(self.umask)\n        self.bus.log('umask old: %03o, new: %03o' % (old_umask, self.umask))\n    self.finalized = True",
    "docstring": "Drop the process privileges.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg FunctionDef name:current_ids arguments Assign If Assign Call Call If Assign Call Call Return return:yes If If BoolOp Compare Compare Call Call If BoolOp Compare Compare If BoolOp Call Call Call If Compare Call Call If Compare Call Call Call If If Compare Call If Compare Call Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "halide_buffer_numel",
    "source_code": "def halide_buffer_numel(self, name: str):\n    return V.graph.get_buffer(name).get_layout().storage_size()",
    "docstring": "We map all tensors to 1D buffers in Halide since Halide has trouble representing some strides that PyTorch supports. If there are gaps in the underlying layout the numel we pass to Halide includes the gaps while PyTorch's numel excludes them.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\halide.py",
    "ast_data": "FunctionDef name:halide_buffer_numel arg:self arg:name arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "setZ",
    "source_code": "def setZ(self, index, value):\n    self.setOrdinate(2, index, value)",
    "docstring": "Set Z with the value at the given index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:setZ arg:self arg:index arg:value arguments arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "anglit_gen",
    "source_code": "class anglit_gen(rv_continuous):\n\n    def _shape_info(self):\n        return []\n\n    def _pdf(self, x):\n        return np.cos(2 * x)\n\n    def _cdf(self, x):\n        return np.sin(x + np.pi / 4) ** 2.0\n\n    def _sf(self, x):\n        return np.cos(x + np.pi / 4) ** 2.0\n\n    def _ppf(self, q):\n        return np.arcsin(np.sqrt(q)) - np.pi / 4\n\n    def _stats(self):\n        return (0.0, np.pi * np.pi / 16 - 0.5, 0.0, -2 * (np.pi ** 4 - 96) / (np.pi * np.pi - 8) ** 2)\n\n    def _entropy(self):\n        return 1 - np.log(2)",
    "docstring": "An anglit continuous random variable. %(before_notes)s Notes ----- The probability density function for is: .. math:: f(x) = \\sin(2x + \\pi/2) = \\cos(2x) for :math:. %(after_notes)s %(example)s",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "ClassDef name:anglit_gen FunctionDef name:_shape_info arg:self arguments arg Return return:no FunctionDef name:_pdf arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_cdf arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_sf arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_ppf arg:self arg:q arguments arg arg Return return:yes Call Call FunctionDef name:_stats arg:self arguments arg Return return:yes FunctionDef name:_entropy arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "broadcast",
    "source_code": "def broadcast(self: torch.Tensor, src: int, group: RANK_TYPES, tag: str=''):\n    group_name = _resolve_group_name(group, tag)\n    tensor = torch.ops._c10d_functional.broadcast(self, src, group_name)\n    return _maybe_wrap_tensor(tensor)",
    "docstring": "Broadcasts the tensor to all processes in the given process group. Args: src (int): Source rank group (ProcessGroup or List[int]): The process group to work on. tag (str, optional): A unique identifier for the collective. Default: empty string",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:broadcast arg:self arg:src arg:group arg:tag arguments arg arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_tensor_list",
    "source_code": "def from_tensor_list(element_spec, tensor_list):\n    return _from_tensor_list_helper(lambda spec, value: spec._from_tensor_list(value), element_spec, tensor_list)",
    "docstring": "Returns an element constructed from the given spec and tensor list. Args: element_spec: A nested structure of objects representing to element type specification. tensor_list: A list of tensors to use for constructing the value. Returns: An element constructed from the given spec and tensor list. Raises: ValueError: If the number of tensors needed to construct an element for the given spec does not match the given number of tensors or the given spec is not compatible with the tensor list.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py",
    "ast_data": "FunctionDef name:from_tensor_list arg:element_spec arg:tensor_list arguments arg arg Return return:yes Call arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "adaptive_max_pool2d_with_indices",
    "source_code": "def adaptive_max_pool2d_with_indices(input: Tensor, output_size: BroadcastingList2[int], return_indices: bool=False) -> tuple[Tensor, Tensor]:\n    if has_torch_function_unary(input):\n        return handle_torch_function(adaptive_max_pool2d_with_indices, (input,), input, output_size, return_indices=return_indices)\n    output_size = _list_with_default(output_size, input.size())\n    return torch._C._nn.adaptive_max_pool2d(input, output_size)",
    "docstring": "adaptive_max_pool2d(input, output_size, return_indices=False) Applies a 2D adaptive max pooling over an input signal composed of several input planes. See :class: for details and output shape. Args: output_size: the target output size (single integer or double-integer tuple) return_indices: whether to return pooling indices. Default: ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:adaptive_max_pool2d_with_indices arg:input arg:output_size arg:return_indices arguments arg arg arg If Call Return return:yes Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "Arg",
    "source_code": "class Arg(PatternExpr):\n\n    def _match(self, node: NodeOrConstant, ctx: MatchContext) -> MatchResult:\n        return Match(ctx, self, args=[node])",
    "docstring": "Capture an arg which will become an input to the handler. Args are passed in depth first order.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "ClassDef name:Arg FunctionDef name:_match arg:self arg:node arg:ctx arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "contains",
    "source_code": "def contains(self, mouseevent):\n    if self._different_canvas(mouseevent) or not self.axes.contains(mouseevent)[0]:\n        return (False, {})\n    trans = self.get_transform().inverted()\n    x, y = trans.transform([mouseevent.x, mouseevent.y])\n    xmin, xmax, ymin, ymax = self.get_extent()\n    inside = x is not None and (x - xmin) * (x - xmax) <= 0 and (y is not None) and ((y - ymin) * (y - ymax) <= 0)\n    return (inside, {})",
    "docstring": "Test whether the mouse event occurred within the image.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:contains arg:self arg:mouseevent arguments arg arg If BoolOp Call Call Return return:yes Assign Call Call Assign Call Assign Call Assign BoolOp Compare Compare Compare Compare Return return:yes"
  },
  {
    "library": "scipy",
    "name": "EchoBackend",
    "source_code": "class EchoBackend:\n    __ua_domain__ = 'numpy.scipy.fft'\n\n    @staticmethod\n    def __ua_function__(method, args, kwargs):\n        print(method, args, kwargs, sep='\\n')",
    "docstring": "Backend that just prints the __ua_function__ arguments",
    "type": "class",
    "file_path": "scipy\\scipy\\fft\\_debug_backends.py",
    "ast_data": "ClassDef name:EchoBackend Assign FunctionDef name:__ua_function__ arg:method arg:args arg:kwargs arguments arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "text",
    "source_code": "def text(self, x, y, z, s, zdir=None, *, axlim_clip=False, **kwargs):\n    text = super().text(x, y, s, **kwargs)\n    art3d.text_2d_to_3d(text, z, zdir, axlim_clip)\n    return text",
    "docstring": "Add the text *s* to the 3D Axes at location *x*, *y*, *z* in data coordinates. Parameters ---------- x, y, z : float The position to place the text. s : str The text. zdir : {'x', 'y', 'z', 3-tuple}, optional The direction to be used as the z-direction. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide text that is outside the axes view limits. .. versionadded:: 3.10 **kwargs Other arguments are forwarded to . Returns ------- The created instance.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:text arg:self arg:x arg:y arg:z arg:s arg:zdir arguments arg arg arg arg arg arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "embedding_tables",
    "source_code": "@property\ndef embedding_tables(self) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]:\n    if self._using_tpu:\n        if save_context.in_save_context():\n            return {table: self._variables[table.name]['parameters'].variables[0] for table in self._table_config}\n        raise RuntimeError('Unable to retrieve embedding tables when using a TPU strategy. If you need access, save your model, create this object under a CPU strategy and restore.')\n    self._maybe_build(None)\n    return {table: self._variables[table.name]['parameters'] for table in self._table_config}",
    "docstring": "Returns a dict of embedding tables, keyed by . This property only works when the object is created under a non-TPU strategy. This is intended to be used to for CPU based lookup when creating a serving checkpoint. Returns: A dict of embedding tables, keyed by . Raises: RuntimeError: If object was created under a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:embedding_tables arg:self arguments arg If If Call Return return:yes Raise Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_is_pattern_dtype_config_and_qconfig_supported_by_backend",
    "source_code": "def _is_pattern_dtype_config_and_qconfig_supported_by_backend(pattern: Optional[Pattern], matched_node_pattern: Optional[list[Node]], qconfig: QConfigAny, backend_config: BackendConfig) -> bool:\n    if backend_config is None or pattern is None:\n        return True\n    assert matched_node_pattern is not None and len(matched_node_pattern) >= 1\n    pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config)\n    dtype_configs: list[DTypeConfig] = pattern_to_dtype_configs.get(pattern, [])\n    pattern_to_root_node_getter = get_fusion_pattern_to_root_node_getter(backend_config)\n    root_node_getter = pattern_to_root_node_getter.get(pattern, _default_root_node_getter)\n    root_node = root_node_getter(matched_node_pattern)\n    input_node = root_node\n    output_node = matched_node_pattern[0]\n    for dtype_config in dtype_configs:\n        supported = True\n        for arg in list(input_node.args) + list(input_node.kwargs.values()):\n            supported = supported and _is_input_arg_dtype_supported_by_backend(arg, input_node, qconfig, dtype_config, backend_config)\n        supported = supported and _is_output_dtype_supported_by_backend(output_node, qconfig, dtype_config)\n        if supported:\n            return True\n    return False",
    "docstring": "Check if the dtype configuration of a pattern is supported by the backend or not, and whether the qconfig satisfies constraints specified in the corresponding dtype config.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\prepare.py",
    "ast_data": "FunctionDef name:_is_pattern_dtype_config_and_qconfig_supported_by_backend arg:pattern arg:matched_node_pattern arg:qconfig arg:backend_config arguments arg arg arg arg If BoolOp Compare Compare Return return:yes BoolOp Compare Compare Call Assign Call Call Assign Call Assign Call Assign Call Assign Assign For Assign For Call Call Call Assign BoolOp Call Assign BoolOp Call If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "increment",
    "source_code": "@staticmethod\ndef increment(event_name: str, log_level: CompileEventLogLevel, key: str, value: int):\n    chromium_log = get_chromium_event_logger()\n    if log_level == CompileEventLogLevel.CHROMIUM or log_level == CompileEventLogLevel.PT2_COMPILE:\n        chromium_log.increment(event_name, key, value)\n    else:\n        assert log_level == CompileEventLogLevel.COMPILATION_METRIC\n        top_event = chromium_log.get_outermost_event()\n        if event_name != top_event:\n            raise RuntimeError(\"Log level is COMPILATION_METRIC, but event_name isn't the toplevel event. CompilationMetrics must be logged to the toplevel event. Consider using `increment_toplevel` directly.\")\n        metrics_context = get_metrics_context()\n        if not metrics_context.in_progress():\n            raise RuntimeError('No metrics context is in progress. Please only call this function within a metrics context/dynamo_timed.')\n        metrics_context.increment(key, value)\n        chromium_log.increment(event_name, key, value)",
    "docstring": "Increments an existing field, or adds it",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:increment arg:event_name arg:log_level arg:key arg:value arguments arg arg arg arg Assign Call If BoolOp Compare Compare Call Compare Assign Call If Compare Raise Call Assign Call If Call Raise Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_decision_function",
    "source_code": "def _decision_function(self, X):\n    X = self._validate_for_predict(X)\n    X = self._compute_kernel(X)\n    if self._sparse:\n        dec_func = self._sparse_decision_function(X)\n    else:\n        dec_func = self._dense_decision_function(X)\n    if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:\n        return -dec_func.ravel()\n    return dec_func",
    "docstring": "Evaluates the decision function for the samples in X. Parameters ---------- X : array-like of shape (n_samples, n_features) Returns ------- X : array-like of shape (n_samples, n_class * (n_class-1) / 2) Returns the decision function of the sample for each class in the model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_base.py",
    "ast_data": "FunctionDef name:_decision_function arg:self arg:X arguments arg arg Assign Call Assign Call If Assign Call Assign Call If BoolOp Compare Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_name_scope_wrapper",
    "source_code": "def _add_name_scope_wrapper(func, api_signature):\n    if 'name' not in api_signature.parameters:\n        return func\n    func_signature = tf_inspect.signature(func)\n    func_argspec = tf_inspect.getargspec(func)\n    if 'name' in func_signature.parameters or func_argspec.keywords is not None:\n        return func\n    name_index = list(api_signature.parameters).index('name')\n\n    def wrapped_func(*args, **kwargs):\n        if name_index < len(args):\n            name = args[name_index]\n            args = args[:name_index] + args[name_index + 1:]\n        else:\n            name = kwargs.pop('name', None)\n        if name is None:\n            return func(*args, **kwargs)\n        else:\n            with ops.name_scope(name):\n                return func(*args, **kwargs)\n    wrapped_func = tf_decorator.make_decorator(func, wrapped_func)\n    wrapped_func.__signature__ = func_signature.replace(parameters=list(func_signature.parameters.values()) + [api_signature.parameters['name']])\n    del wrapped_func._tf_decorator\n    return wrapped_func",
    "docstring": "Wraps to expect a \"name\" arg, and use it to call . If already expects a \"name\" arg, or if does not expect a \"name\" arg, then returns as-is. Args: func: The function to wrap. Signature must match (except the \"name\" parameter may be missing. api_signature: The signature of the original API (used to find the index for the \"name\" parameter). Returns: The wrapped function (or the original function if no wrapping is needed).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:_add_name_scope_wrapper arg:func arg:api_signature arguments arg arg If Compare Return return:yes Assign Call Assign Call If BoolOp Compare Compare Return return:yes Assign Call Call FunctionDef name:wrapped_func arguments arg arg If Compare Call Assign Assign Assign Call If Compare Return return:yes Call With Call Return return:yes Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "wait_event",
    "source_code": "def wait_event(self, event) -> None:\n    event.wait(self)",
    "docstring": "Make all future work submitted to the stream wait for an event. Args: event (torch.cuda.Event): an event to wait for. .. note:: This is a wrapper around `CUDA Stream documentationevent`: only future operations are affected. .. _CUDA Stream documentation:",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\streams.py",
    "ast_data": "FunctionDef name:wait_event arg:self arg:event arguments arg arg Call"
  },
  {
    "library": "django",
    "name": "file_hash",
    "source_code": "def file_hash(self, name, content=None):\n    if content is None:\n        return None\n    hasher = md5(usedforsecurity=False)\n    for chunk in content.chunks():\n        hasher.update(chunk)\n    return hasher.hexdigest()[:12]",
    "docstring": "Return a hash of the file with the given name and optional content.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\storage.py",
    "ast_data": "FunctionDef name:file_hash arg:self arg:name arg:content arguments arg arg arg If Compare Return return:no Assign Call For Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_window_indexer",
    "source_code": "def _get_window_indexer(self) -> BaseIndexer:\n    return ExponentialMovingWindowIndexer()",
    "docstring": "Return an indexer class that will compute the window start and end bounds",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\ewm.py",
    "ast_data": "FunctionDef name:_get_window_indexer arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "remove_kernel_local_buffers",
    "source_code": "def remove_kernel_local_buffers(self) -> None:\n    scheduler = V.graph.scheduler\n    if not scheduler:\n        return\n    fused_node_names = OrderedSet((scheduler.name_to_buf[buf].defining_op_name() for buf in self.store_buffer_names if buf in scheduler.name_to_buf))\n    names_to_remove = OrderedSet[str]()\n    for name in self.store_buffer_names:\n        if name not in self.must_keep_buffers and name not in self.args.input_buffers and scheduler.can_buffer_be_removed_through_fusion(name, fused_node_names):\n            names_to_remove.add(name)\n    for name in names_to_remove:\n        if name in self.args.inplace_buffers:\n            buf = self.args.inplace_buffers[name]\n            if isinstance(buf, RemovedArg):\n                continue\n            remove = all((n in names_to_remove for n in buf.other_names))\n            if remove:\n                self.remove_inplace_buffer(name)\n            self.inplaced_to_remove.add(name)\n        else:\n            self.remove_buffer(name)",
    "docstring": "Any buffers that are both created and have a last use in the same kernel can be removed. Note that V.graph.scheduler can be None when codegening triton template kernels.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "FunctionDef name:remove_kernel_local_buffers arg:self arguments arg Assign If Return return:no Assign Call Call Compare Assign Call For If BoolOp Compare Compare Call Call For If Compare Assign If Call Assign Call Compare If Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_is_comparable_dtype",
    "source_code": "def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n    if self.tz is not None:\n        return isinstance(dtype, DatetimeTZDtype)\n    return lib.is_np_dtype(dtype, 'M')",
    "docstring": "Can we compare values of the given dtype to our own?",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\datetimes.py",
    "ast_data": "FunctionDef name:_is_comparable_dtype arg:self arg:dtype arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_unflatten_state_dict",
    "source_code": "def _unflatten_state_dict(state_dict: STATE_DICT_TYPE, mapping: FLATTEN_MAPPING) -> STATE_DICT_TYPE:\n    nested: STATE_DICT_TYPE = {}\n    for key, value in state_dict.items():\n        _set_element(nested, mapping[key], value)\n    return nested",
    "docstring": "Restore the original nested state_dict according to ``.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_unflatten_state_dict arg:state_dict arg:mapping arguments arg arg For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "plan",
    "source_code": "def plan(self, state: MemoryPlanningState) -> MemoryPlanningLine:\n    return self",
    "docstring": "First pass to find reuse",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py",
    "ast_data": "FunctionDef name:plan arg:self arg:state arguments arg arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "doc2path",
    "source_code": "def doc2path(self, docname: str, base: bool=True) -> _StrPath:\n    return self.project.doc2path(docname, absolute=base)",
    "docstring": "Return the filename for the document name. If *base* is True, return absolute path under self.srcdir. If *base* is False, return relative path to self.srcdir.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\__init__.py",
    "ast_data": "FunctionDef name:doc2path arg:self arg:docname arg:base arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_sub_value",
    "source_code": "@staticmethod\ndef get_sub_value(user):\n    return user.get_user_id()",
    "docstring": "Return user's ID as `` value in token payload. For instance:: @staticmethod def get_sub_value(user): return str(user.id)",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7523\\token.py",
    "ast_data": "FunctionDef name:get_sub_value arg:user arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "NonlinearConstraint",
    "source_code": "class NonlinearConstraint:\n\n    def __init__(self, fun, lb, ub, jac='2-point', hess=None, keep_feasible=False, finite_diff_rel_step=None, finite_diff_jac_sparsity=None):\n        if hess is None:\n            hess = BFGS()\n        self.fun = fun\n        self.lb = lb\n        self.ub = ub\n        self.finite_diff_rel_step = finite_diff_rel_step\n        self.finite_diff_jac_sparsity = finite_diff_jac_sparsity\n        self.jac = jac\n        self.hess = hess\n        self.keep_feasible = keep_feasible",
    "docstring": "Nonlinear constraint on the variables. The constraint has the general inequality form:: lb array_like, shape (m,)`lbublbubHessianUpdateStrategyBFGSSR1`x[0] >> from scipy.optimize import NonlinearConstraint >>> import numpy as np >>> con = lambda x: x[0] - np.sin(x[1]) >>> nlc = NonlinearConstraint(con, -np.inf, 1.9)",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_constraints.py",
    "ast_data": "ClassDef name:NonlinearConstraint FunctionDef name:__init__ arg:self arg:fun arg:lb arg:ub arg:jac arg:hess arg:keep_feasible arg:finite_diff_rel_step arg:finite_diff_jac_sparsity arguments arg arg arg arg arg arg arg arg arg If Compare Assign Call Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "ShardedOptimStateDictConfig",
    "source_code": "@dataclass\nclass ShardedOptimStateDictConfig(OptimStateDictConfig):\n    _use_dtensor: bool = False",
    "docstring": "`ShardedOptimStateDictConfig`.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\api.py",
    "ast_data": "ClassDef name:ShardedOptimStateDictConfig"
  },
  {
    "library": "matplotlib",
    "name": "get_parse_math",
    "source_code": "def get_parse_math(self):\n    return self._parse_math",
    "docstring": "Return whether mathtext parsing is considered for this .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_parse_math arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__call__",
    "source_code": "def __call__(self, x):\n    with np.errstate(invalid='ignore'):\n        return umath.less_equal(x, self.critical_value)",
    "docstring": "Executes the call behavior.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg With Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "to_proto",
    "source_code": "def to_proto(self, export_scope=None):\n    if export_scope is None:\n        return self.saver_def\n    if not (self.saver_def.filename_tensor_name.startswith(export_scope) and self.saver_def.save_tensor_name.startswith(export_scope) and self.saver_def.restore_op_name.startswith(export_scope)):\n        return None\n    saver_def = saver_pb2.SaverDef()\n    saver_def.CopyFrom(self.saver_def)\n    saver_def.filename_tensor_name = ops.strip_name_scope(saver_def.filename_tensor_name, export_scope)\n    saver_def.save_tensor_name = ops.strip_name_scope(saver_def.save_tensor_name, export_scope)\n    saver_def.restore_op_name = ops.strip_name_scope(saver_def.restore_op_name, export_scope)\n    return saver_def",
    "docstring": "Converts this to a protocol buffer. Args: export_scope: Optional . Name scope to remove. Returns: A protocol buffer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:to_proto arg:self arg:export_scope arguments arg arg If Compare Return return:yes If BoolOp Call Call Call Return return:no Assign Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "num_encoded_dims",
    "source_code": "@property\ndef num_encoded_dims(self) -> int:\n    return self._num_encoded_dims",
    "docstring": "Number of encoded dimensions.",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\positional_encoder.py",
    "ast_data": "FunctionDef name:num_encoded_dims arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, callback, failsafe=None, priority=None, **kwargs):\n    self.callback = callback\n    if failsafe is None:\n        failsafe = getattr(callback, 'failsafe', False)\n    self.failsafe = failsafe\n    if priority is None:\n        priority = getattr(callback, 'priority', 50)\n    self.priority = priority\n    self.kwargs = kwargs",
    "docstring": "Initialize the hook instance.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cprequest.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:callback arg:failsafe arg:priority arguments arg arg arg arg arg Assign If Compare Assign Call Assign If Compare Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "AddValue",
    "source_code": "def AddValue(self, val: core_types.Tensor) -> core_types.Tensor:\n    if not self._outer_context:\n        return val\n    if val.name in self._values:\n        result = self._external_values.get(val.name)\n        return val if result is None else result\n    result = val\n    self._values.add(val.name)\n    if self._outer_context:\n        result = self._outer_context.AddValue(val)\n        self._values.add(result.name)\n    self._external_values[val.name] = result\n    return result",
    "docstring": "Add to the current context and its outer context recursively.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_replication.py",
    "ast_data": "FunctionDef name:AddValue arg:self arg:val arguments arg arg If Return return:yes If Compare Assign Call Return return:yes Compare Assign Call If Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_create_join_index",
    "source_code": "@final\ndef _create_join_index(self, index: Index, other_index: Index, indexer: npt.NDArray[np.intp] | None, how: JoinHow='left') -> Index:\n    if self.how in (how, 'outer') and (not isinstance(other_index, MultiIndex)):\n        mask = indexer == -1\n        if np.any(mask):\n            fill_value = na_value_for_dtype(index.dtype, compat=False)\n            index = index.append(Index([fill_value]))\n    if indexer is None:\n        return index.copy()\n    return index.take(indexer)",
    "docstring": "Create a join index by rearranging one index to match another Parameters ---------- index : Index index being rearranged other_index : Index used to supply values not found in index indexer : np.ndarray[np.intp] or None how to rearrange index how : str Replacement is only necessary if indexer based on other_index. Returns ------- Index",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:_create_join_index arg:self arg:index arg:other_index arg:indexer arg:how arguments arg arg arg arg arg If BoolOp Compare Call Assign Compare If Call Assign Call Assign Call Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_get_empty_dtype",
    "source_code": "def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj:\n    if lib.dtypes_all_equal([ju.block.dtype for ju in join_units]):\n        empty_dtype = join_units[0].block.dtype\n        return empty_dtype\n    has_none_blocks = any((unit.block.dtype.kind == 'V' for unit in join_units))\n    dtypes = [unit.block.dtype for unit in join_units if not unit.is_na]\n    dtype = find_common_type(dtypes)\n    if has_none_blocks:\n        dtype = ensure_dtype_can_hold_na(dtype)\n    return dtype",
    "docstring": "Return dtype and N/A values to use when concatenating specified units. Returned N/A value may be None which means there was no casting involved. Returns ------- dtype",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\internals\\concat.py",
    "ast_data": "FunctionDef name:_get_empty_dtype arg:join_units arguments arg If Call Assign Return return:yes Assign Call Compare Assign Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_HistogramMseMaxFrequency",
    "source_code": "@_implements(_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY)\nclass _HistogramMseMaxFrequency(_HistogramCalibrationAlgorithmBase):\n\n    def get_min_max_value(self) -> tuple[float, float]:\n        freq_max_idx = np.argmax(self._hist_freq)\n        return self._get_min_max_value_by_expanding_range(freq_max_idx)",
    "docstring": "HistogramMseMaxFrequency for calculating min and max values of calibration result.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "ClassDef name:_HistogramMseMaxFrequency FunctionDef name:get_min_max_value arg:self arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "InputReplicationMode",
    "source_code": "@tf_export('distribute.InputReplicationMode')\nclass InputReplicationMode(enum.Enum):\n    PER_WORKER = 'PER_WORKER'\n    PER_REPLICA = 'PER_REPLICA'",
    "docstring": "Replication mode for input function. * : The input function will be called on each worker independently, creating as many input pipelines as number of workers. Replicas will dequeue from the local Dataset on their worker. doesn't manage any state sharing between such separate input pipelines. * : The input function will be called on each replica separately. doesn't manage any state sharing between such separate input pipelines.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "ClassDef name:InputReplicationMode Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "elu",
    "source_code": "@dispatch.add_dispatch_support\ndef elu(x, alpha=1.0):\n    return backend.elu(x, alpha)",
    "docstring": "Exponential Linear Unit. The exponential linear unit (ELU) with is: if and if alphaxx > 0alpha * (exp(x) - 1)x < 0`. Reference: [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) (Clevert et al, 2016)](",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py",
    "ast_data": "FunctionDef name:elu arg:x arg:alpha arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_create_op_prefix",
    "source_code": "def _create_op_prefix(name: str) -> str:\n    camel_case = ''.join([p.title() for p in name.split('_')])\n    return (camel_case + 'Backward').replace('ForwardBackward', 'Backward')",
    "docstring": "Takes a native function name converts to an op prefix name. Note that the \"name\" parameter must be the native function name without the optional variant suffix, so \"add\" instead of \"add.out\". OP names correspond to classes, hence the change to title case. Example:: >>> _create_op_prefix(\"add\") 'AddBackward'",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\load_derivatives.py",
    "ast_data": "FunctionDef name:_create_op_prefix arg:name arguments arg Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SimpleCSEHandler",
    "source_code": "class SimpleCSEHandler(WrapperHandler):\n\n    def __init__(self, inner: Any):\n        super().__init__(inner)\n        self.cse_cache: dict[str, Union[Any, tuple[Any, ...]]] = {}\n        self.mock = MockHandler()\n\n    def indirect_indexing(self, *args, **kwargs) -> sympy.Expr:\n        return super().indirect_indexing(*args, **kwargs)\n\n    def store(self, *args, **kwargs) -> None:\n        raise NotImplementedError('store not implemented')\n\n    def store_reduction(self, *args, **kwargs) -> None:\n        raise NotImplementedError('store not implemented')\n\n    def _default(self, name: str, args: tuple[Any, ...], kwargs: dict[str, Any]) -> Any:\n        key = getattr(self.mock, name)(*args, **kwargs)\n        val = self.cse_cache.get(key)\n        if val is not None:\n            return val\n        val = getattr(self._inner, name)(*args, **kwargs)\n        self.cse_cache[key] = val\n        return val",
    "docstring": "Wraps the underlying handler with a CSE pass NOTE: Compared to codegen level CSE this is simplified as it doesn't support stores which require load cache invalidation.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\ops_handler.py",
    "ast_data": "ClassDef name:SimpleCSEHandler FunctionDef name:__init__ arg:self arg:inner arguments arg arg Call Call Assign Call FunctionDef name:indirect_indexing arg:self arguments arg arg arg Return return:yes Call Call FunctionDef name:store arg:self arguments arg arg arg Raise Call FunctionDef name:store_reduction arg:self arguments arg arg arg Raise Call FunctionDef name:_default arg:self arg:name arg:args arg:kwargs arguments arg arg arg arg Assign Call Call Assign Call If Compare Return return:yes Assign Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_bcast_lwls",
    "source_code": "@staticmethod\ndef _bcast_lwls(linewidths, dashes):\n    if mpl.rcParams['_internal.classic_mode']:\n        return (linewidths, dashes)\n    if len(dashes) != len(linewidths):\n        l_dashes = len(dashes)\n        l_lw = len(linewidths)\n        gcd = math.gcd(l_dashes, l_lw)\n        dashes = list(dashes) * (l_lw // gcd)\n        linewidths = list(linewidths) * (l_dashes // gcd)\n    dashes = [mlines._scale_dashes(o, d, lw) for (o, d), lw in zip(dashes, linewidths)]\n    return (linewidths, dashes)",
    "docstring": "Internal helper function to broadcast + scale ls/lw In the collection drawing code, the linewidth and linestyle are cycled through as circular buffers (via ``). Thus, if we are going to scale the dash pattern at set time (not draw time) we need to do the broadcasting now and expand both lists to be the same length. Parameters ---------- linewidths : list line widths of collection dashes : list dash specification (offset, (dash pattern tuple)) Returns ------- linewidths, dashes : list Will be the same length, dashes are scaled by paired linewidth",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:_bcast_lwls arg:linewidths arg:dashes arguments arg arg If Return return:yes If Compare Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "check_value_size",
    "source_code": "def check_value_size(value, mask: npt.NDArray[np.bool_], length: int):\n    if is_array_like(value):\n        if len(value) != length:\n            raise ValueError(f\"Length of 'value' does not match. Got ({len(value)})  expected {length}\")\n        value = value[mask]\n    return value",
    "docstring": "Validate the size of the values passed to ExtensionArray.fillna.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\missing.py",
    "ast_data": "FunctionDef name:check_value_size arg:value arg:mask arg:length arguments arg arg arg If Call If Compare Call Raise Call Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "proxy",
    "source_code": "def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For', scheme='X-Forwarded-Proto', debug=False):\n    request = cherrypy.serving.request\n    if scheme:\n        s = request.headers.get(scheme, None)\n        if debug:\n            cherrypy.log('Testing scheme %r:%r' % (scheme, s), 'TOOLS.PROXY')\n        if s == 'on' and 'ssl' in scheme.lower():\n            scheme = 'https'\n        else:\n            scheme = s\n    if not scheme:\n        scheme = request.base[:request.base.find('://')]\n    if local:\n        lbase = request.headers.get(local, None)\n        if debug:\n            cherrypy.log('Testing local %r:%r' % (local, lbase), 'TOOLS.PROXY')\n        if lbase is not None:\n            base = lbase.split(',')[0]\n    if not base:\n        default = urllib.parse.urlparse(request.base).netloc\n        base = request.headers.get('Host', default)\n    if base.find('://') == -1:\n        base = scheme + '://' + base\n    request.base = base\n    if remote:\n        xff = request.headers.get(remote)\n        if debug:\n            cherrypy.log('Testing remote %r:%r' % (remote, xff), 'TOOLS.PROXY')\n        if xff:\n            if remote == 'X-Forwarded-For':\n                xff = next((ip.strip() for ip in xff.split(',')))\n            request.remote.ip = xff",
    "docstring": "Change the base URL (scheme://host[:port][/path]). For running a CP server behind Apache, lighttpd, or other HTTP server. For Apache and lighttpd, you should leave the 'local' argument at the default value of 'X-Forwarded-Host'. For Squid, you probably want to set tools.proxy.local = 'Origin'. If you want the new request.base to include path info (not just the host), you must explicitly set base to the full base path, and ALSO set 'local' to '', so that the X-Forwarded-Host request header (which never includes path info) does not override it. Regardless, the value for 'base' MUST NOT end in a slash. cherrypy.request.remote.ip (the IP address of the client) will be rewritten if the header specified by the 'remote' arg is valid. By default, 'remote' is set to 'X-Forwarded-For'. If you do not want to rewrite remote.ip, set the 'remote' arg to an empty string.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:proxy arg:base arg:local arg:remote arg:scheme arg:debug arguments arg arg arg arg arg Assign If Assign Call If Call If BoolOp Compare Compare Call Assign Assign If Assign Call If Assign Call If Call If Compare Assign Call If Assign Call Assign Call If Compare Call Assign Assign If Assign Call If Call If If Compare Assign Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "readline",
    "source_code": "def readline(self):\n    self._preread_check()\n    return self._prepare_value(self._read_buf.readline())",
    "docstring": "Reads the next line, keeping \\n. At EOF, returns ''.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:readline arg:self arguments arg Call Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "make_app",
    "source_code": "class make_app:\n\n    def __init__(self, nextapp, path=None, aggregate=False):\n        if profile is None or pstats is None:\n            msg = \"Your installation of Python does not have a profile module. If you're on Debian, try `sudo apt-get install python-profiler`. See http://www.cherrypy.org/wiki/ProfilingOnDebian for details.\"\n            warnings.warn(msg)\n        self.nextapp = nextapp\n        self.aggregate = aggregate\n        if aggregate:\n            self.profiler = ProfileAggregator(path)\n        else:\n            self.profiler = Profiler(path)\n\n    def __call__(self, environ, start_response):\n\n        def gather():\n            result = []\n            for line in self.nextapp(environ, start_response):\n                result.append(line)\n            return result\n        return self.profiler.run(gather)",
    "docstring": "Profiling WSGI middleware wrapper.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\lib\\profiler.py",
    "ast_data": "ClassDef name:make_app FunctionDef name:__init__ arg:self arg:nextapp arg:path arg:aggregate arguments arg arg arg arg If BoolOp Compare Compare Assign Call Assign Assign If Assign Call Assign Call FunctionDef name:__call__ arg:self arg:environ arg:start_response arguments arg arg arg FunctionDef name:gather arguments Assign For Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "infer_bbox_shape3d",
    "source_code": "def infer_bbox_shape3d(boxes: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n    validate_bbox3d(boxes)\n    left = torch.index_select(boxes, 1, torch.tensor([1, 2, 5, 6], device=boxes.device, dtype=torch.long))[:, :, 0]\n    right = torch.index_select(boxes, 1, torch.tensor([0, 3, 4, 7], device=boxes.device, dtype=torch.long))[:, :, 0]\n    widths = (left - right + 1)[:, 0]\n    bot = torch.index_select(boxes, 1, torch.tensor([2, 3, 6, 7], device=boxes.device, dtype=torch.long))[:, :, 1]\n    upper = torch.index_select(boxes, 1, torch.tensor([0, 1, 4, 5], device=boxes.device, dtype=torch.long))[:, :, 1]\n    heights = (bot - upper + 1)[:, 0]\n    depths = (boxes[:, 4:, 2] - boxes[:, :4, 2] + 1)[:, 0]\n    return (depths, heights, widths)",
    "docstring": "Auto-infer the output sizes for the given 3D bounding boxes. Args: boxes: a tensor containing the coordinates of the bounding boxes to be extracted. The tensor must have the shape of Bx8x3, where each box is defined in the following `(B,)(B,)(B,)`. Example: >>> boxes = torch.tensor([[[ 0, 1, 2], ... [10, 1, 2], ... [10, 21, 2], ... [ 0, 21, 2], ... [ 0, 1, 32], ... [10, 1, 32], ... [10, 21, 32], ... [ 0, 21, 32]], ... [[ 3, 4, 5], ... [43, 4, 5], ... [43, 54, 5], ... [ 3, 54, 5], ... [ 3, 4, 65], ... [43, 4, 65], ... [43, 54, 65], ... [ 3, 54, 65]]]) # 2x8x3 >>> infer_bbox_shape3d(boxes) (tensor([31, 61]), tensor([21, 51]), tensor([11, 41]))",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\bbox.py",
    "ast_data": "FunctionDef name:infer_bbox_shape3d arg:boxes arguments arg Call Assign Call Call Assign Call Call Assign Assign Call Call Assign Call Call Assign Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__cmp__",
    "source_code": "def __cmp__(self, other):\n    return builtins.cmp(self.value, other.value)",
    "docstring": "Compare current HTTP header to another by value only.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:__cmp__ arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "PyfftwBackend",
    "source_code": "class PyfftwBackend:\n    __ua_domain__ = 'numpy.scipy.fft'\n\n    @staticmethod\n    def __ua_function__(method, args, kwargs):\n        kwargs.pop('overwrite_x', None)\n        fn = getattr(pyfftw_fft, method.__name__, None)\n        return NotImplemented if fn is None else fn(*args, **kwargs)",
    "docstring": "Backend for pyfftw",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\fft_basic.py",
    "ast_data": "ClassDef name:PyfftwBackend Assign FunctionDef name:__ua_function__ arg:method arg:args arg:kwargs arguments arg arg arg Call Assign Call Return return:yes Compare Call"
  },
  {
    "library": "django",
    "name": "_check_filter_vertical",
    "source_code": "def _check_filter_vertical(self, obj):\n    if not isinstance(obj.filter_vertical, (list, tuple)):\n        return must_be('a list or tuple', option='filter_vertical', obj=obj, id='admin.E017')\n    else:\n        return list(chain.from_iterable((self._check_filter_item(obj, field_name, 'filter_vertical[%d]' % index) for index, field_name in enumerate(obj.filter_vertical))))",
    "docstring": "Check that filter_vertical is a sequence of field names.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_filter_vertical arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "has_partial_batch",
    "source_code": "@abc.abstractmethod\ndef has_partial_batch(self):\n    raise NotImplementedError",
    "docstring": "Whether the dataset has partial batch at the end.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:has_partial_batch arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "master",
    "source_code": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n    if task_type is not None and task_id is not None:\n        master = self.cluster_spec().task_address(task_type, task_id)\n    else:\n        master = self._master\n    return format_master_url(master, rpc_layer=rpc_layer or self._rpc_layer)",
    "docstring": "Returns the master address to use when creating a session. Note: this is only useful for TensorFlow 1.x. Args: task_type: (Optional) The type of the TensorFlow task of the master. task_id: (Optional) The index of the TensorFlow task of the master. rpc_layer: (Optional) The RPC used by distributed TensorFlow. Returns: The name or URL of the session master. If a task_type and task_id is given, this will override the string passed into the initialization function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:master arg:self arg:task_type arg:task_id arg:rpc_layer arguments arg arg arg arg If BoolOp Compare Compare Assign Call Call Assign Return return:yes Call BoolOp"
  },
  {
    "library": "scipy",
    "name": "run_dualannealing",
    "source_code": "def run_dualannealing(self):\n    self.function.nfev = 0\n    t0 = time.time()\n    res = dual_annealing(self.fun, self.bounds)\n    t1 = time.time()\n    res.success = self.function.success(res.x)\n    res.nfev = self.function.nfev\n    self.add_result(res, t1 - t0, 'DA')",
    "docstring": "Do an optimization run for dual_annealing",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\optimize.py",
    "ast_data": "FunctionDef name:run_dualannealing arg:self arguments arg Assign Assign Call Assign Call Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_with_nonzero_rank",
    "source_code": "def _with_nonzero_rank(data):\n    if data.shape.ndims is not None:\n        if data.shape.ndims == 0:\n            return array_ops_stack.stack([data])\n        else:\n            return data\n    else:\n        data_shape = shape(data)\n        data_ndims = rank(data)\n        return reshape(data, concat([[1], data_shape], axis=0)[-data_ndims:])",
    "docstring": "If is scalar, then add a dimension; otherwise return as-is.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:_with_nonzero_rank arg:data arguments arg If Compare If Compare Return return:yes Call Return return:yes Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "items",
    "source_code": "def items(self) -> Iterable[tuple[str, Any]]:\n    return ((k, self[k]) for k in self._keys)",
    "docstring": "Return an iterable of the ParameterDict key/value pairs.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\container.py",
    "ast_data": "FunctionDef name:items arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "convert_frame",
    "source_code": "def convert_frame(compiler_fn: CompilerFn, hooks: Hooks) -> ConvertFrame:\n    return ConvertFrame(compiler_fn, hooks)",
    "docstring": "Try to convert a frame into an FX graph, if error leave frame unmodified",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\convert_frame.py",
    "ast_data": "FunctionDef name:convert_frame arg:compiler_fn arg:hooks arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "getsourcefile",
    "source_code": "def getsourcefile(object):\n    return _inspect.getsourcefile(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.getsourcefile.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:getsourcefile arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_unstack_extension_series",
    "source_code": "def _unstack_extension_series(series: Series, level, fill_value, sort: bool) -> DataFrame:\n    df = series.to_frame()\n    result = df.unstack(level=level, fill_value=fill_value, sort=sort)\n    result.columns = result.columns._drop_level_numbers([0])\n    return result",
    "docstring": "Unstack an ExtensionArray-backed Series. The ExtensionDtype is preserved. Parameters ---------- series : Series A Series with an ExtensionArray for values level : Any The level name or number. fill_value : Any The user-level (not physical storage) fill value to use for missing values introduced by the reshape. Passed to ``. sort : bool Whether to sort the resulting MuliIndex levels Returns ------- DataFrame Each column of the DataFrame will have the same dtype as the input Series.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\reshape.py",
    "ast_data": "FunctionDef name:_unstack_extension_series arg:series arg:level arg:fill_value arg:sort arguments arg arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_interpolation_stage",
    "source_code": "def set_interpolation_stage(self, s):\n    s = mpl._val_or_rc(s, 'image.interpolation_stage')\n    _api.check_in_list(['data', 'rgba', 'auto'], s=s)\n    self._interpolation_stage = s\n    self.stale = True",
    "docstring": "Set when interpolation happens during the transform to RGBA. Parameters ---------- s : {'data', 'rgba', 'auto'}, default: :rc: Whether to apply resampling interpolation in data or RGBA space. If 'auto', 'rgba' is used if the upsampling rate is less than 3, otherwise 'data' is used.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:set_interpolation_stage arg:self arg:s arguments arg arg Assign Call Call Assign Assign"
  },
  {
    "library": "scipy",
    "name": "_make_diagonal_csr",
    "source_code": "def _make_diagonal_csr(data, is_array=False):\n    from ._csr import csr_array, csr_matrix\n    csr_array = csr_array if is_array else csr_matrix\n    N = len(data)\n    idx_dtype = get_index_dtype(maxval=N)\n    indptr = np.arange(N + 1, dtype=idx_dtype)\n    indices = indptr[:-1]\n    return csr_array((data, indices, indptr), shape=(N, N))",
    "docstring": "build diagonal csc_array/csr_array => self._csr_container Parameter should be a raveled numpy array holding the values on the diagonal of the resulting sparse matrix.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_compressed.py",
    "ast_data": "FunctionDef name:_make_diagonal_csr arg:data arg:is_array arguments arg arg Assign Assign Call Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ones_like",
    "source_code": "@tf_export(v1=['ones_like'])\n@dispatch.register_unary_elementwise_api\n@dispatch.add_dispatch_support\ndef ones_like(tensor, dtype=None, name=None, optimize=True):\n    return ones_like_impl(tensor, dtype, name, optimize)",
    "docstring": "Creates a tensor with all elements set to 1. See also . Given a single tensor (), this operation returns a tensor of the same type and shape as with all elements set to 1. Optionally, you can specify a new type () for the returned tensor. For example: Args: tensor: A . dtype: A type for the returned . Must be , , , , , , , , , or . name: A name for the operation (optional). optimize: if true, attempt to statically determine the shape of 'tensor' and encode it as a constant. Returns: A with all elements set to 1.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:ones_like arg:tensor arg:dtype arg:name arg:optimize arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_fusion_pattern_to_extra_inputs_getter",
    "source_code": "def get_fusion_pattern_to_extra_inputs_getter(backend_config: BackendConfig) -> dict[Pattern, Callable]:\n    extra_inputs_getter_mapping: dict[Pattern, Callable] = {}\n    for pattern, config in backend_config._pattern_complex_format_to_config.items():\n        if config._extra_inputs_getter is not None:\n            extra_inputs_getter_mapping[pattern] = config._extra_inputs_getter\n    return extra_inputs_getter_mapping",
    "docstring": "Get a map from fusion pattern to a function that returns extra input nodes from the fusion pattern, in the order required by the root node. This is optional, if not specified, we will not copy over any extra inputs for the root node. Example: # Let's say we have the pattern (torch.add, MatchAllNode, (torch.nn.BatchNorm2d, torch.nn.Conv2d)) # and root node is torch.nn.Conv2d, and the node in MatchAllNode would be an extra # argument to the fused module, we can unpack the pattern and return the node at # MatchAllNode here # we can implement extra_inputs_getter as follows: def extra_inputs_getter(pattern) -> List[Any]: add, extra_input, conv_pattern = pattern return [extra_input]",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\utils.py",
    "ast_data": "FunctionDef name:get_fusion_pattern_to_extra_inputs_getter arg:backend_config arguments arg For Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_recording_summaries",
    "source_code": "def is_recording_summaries():\n    if _summary_state.writer is None:\n        return False\n    if _summary_state.is_recording is None:\n        return False\n    return _summary_state.is_recording",
    "docstring": "Returns non-Tensor boolean indicating if summaries are being recorded.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:is_recording_summaries arguments If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_unpack",
    "source_code": "def _unpack(self, im):\n    im = im[::-1]\n    if im.ndim == 2:\n        return (im, None)\n    else:\n        rgb = im[:, :, :3]\n        rgb = np.array(rgb, order='C')\n        if im.shape[2] == 4:\n            alpha = im[:, :, 3][..., None]\n            if np.all(alpha == 255):\n                alpha = None\n            else:\n                alpha = np.array(alpha, order='C')\n        else:\n            alpha = None\n        return (rgb, alpha)",
    "docstring": "Unpack image array *im* into `` (grayscale or alpha), except that alpha is None if the image is fully opaque.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:_unpack arg:self arg:im arguments arg arg Assign If Compare Return return:yes Assign Assign Call If Compare Assign If Call Compare Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_spatial_gradient_kernel3d",
    "source_code": "def get_spatial_gradient_kernel3d(mode: str, order: int, device: Optional[Device]=None, dtype: Optional[Dtype]=None) -> Tensor:\n    KORNIA_CHECK(mode.lower() in {'sobel', 'diff'}, f'Mode should be `sobel` or `diff`. Got {mode}')\n    KORNIA_CHECK(order in {1, 2}, f'Order should be 1 or 2. Got {order}')\n    if mode == 'diff' and order == 1:\n        kernel = get_diff_kernel3d(device=device, dtype=dtype)\n    elif mode == 'diff' and order == 2:\n        kernel = get_diff_kernel3d_2nd_order(device=device, dtype=dtype)\n    else:\n        raise NotImplementedError(f'Not implemented 3d gradient kernel for order {order} on mode {mode}')\n    return kernel",
    "docstring": "Return kernel for 1st or 2nd order scale pyramid gradients. Uses one of the following operators: sobel, diff.",
    "type": "function",
    "file_path": "kornia\\kornia\\filters\\kernels.py",
    "ast_data": "FunctionDef name:get_spatial_gradient_kernel3d arg:mode arg:order arg:device arg:dtype arguments arg arg arg arg Call Compare Call Call Compare If BoolOp Compare Compare Assign Call If BoolOp Compare Compare Assign Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "moving_average_update",
    "source_code": "@doc_controls.do_not_generate_docs\ndef moving_average_update(x, value, momentum):\n    if tf2.enabled():\n        momentum = math_ops.cast(momentum, x.dtype)\n        value = math_ops.cast(value, x.dtype)\n        return x.assign(x * momentum + value * (1 - momentum))\n    else:\n        return moving_averages.assign_moving_average(x, value, momentum, zero_debias=True)",
    "docstring": "Compute the exponential moving average of a value. The moving average 'x' is updated with 'value' following: For example: >>> x = tf.Variable(0.0) >>> momentum=0.9 >>> moving_average_update(x, value = 2.0, momentum=momentum).numpy() >>> x.numpy() 0.2 The result will be biased towards the initial value of the variable. If the variable was initialized to zero, you can divide by to debias it (Section 3 of [Kingma et al., 2015]( >>> num_updates = 1.0 >>> x_zdb = x/(1 - momentum**num_updates) >>> x_zdb.numpy() 2.0 Args: x: A Variable, the moving average. value: A tensor with the same shape as , the new value to be averaged in. momentum: The moving average momentum. Returns: The updated variable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:moving_average_update arg:x arg:value arg:momentum arguments arg arg arg If Call Assign Call Assign Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "variable_dtype",
    "source_code": "@property\ndef variable_dtype(self):\n    return self._variable_dtype",
    "docstring": "The variable dtype of this policy. This is the dtype layers will create their variables in, unless a layer explicitly chooses a different dtype. If this is different than , Layers will cast variables to the compute dtype to avoid type errors. Variable regularizers are run in the variable dtype, not the compute dtype. Returns: The variable dtype of this policy, as a string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\policy.py",
    "ast_data": "FunctionDef name:variable_dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__getitem__",
    "source_code": "def __getitem__(self, position):\n    return self._cells[position]",
    "docstring": "Retrieve a custom cell from a given position.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\table.py",
    "ast_data": "FunctionDef name:__getitem__ arg:self arg:position arguments arg arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "add_target_and_index",
    "source_code": "def add_target_and_index(self, name: ObjDescT, sig: str, signode: desc_signature) -> None:\n    pass",
    "docstring": "Add cross-reference IDs and entries to self.indexnode, if applicable. *name* is whatever :meth: returned.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\directives\\__init__.py",
    "ast_data": "FunctionDef name:add_target_and_index arg:self arg:name arg:sig arg:signode arguments arg arg arg arg"
  },
  {
    "library": "scipy",
    "name": "_call_nearest",
    "source_code": "def _call_nearest(self, x_new):\n    x_new_indices = searchsorted(self.x_bds, x_new, side=self._side)\n    x_new_indices = x_new_indices.clip(0, len(self.x) - 1).astype(intp)\n    y_new = self._y[x_new_indices]\n    return y_new",
    "docstring": "Find nearest neighbor interpolated y_new = f(x_new).",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:_call_nearest arg:self arg:x_new arguments arg arg Assign Call Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "mask_tensor",
    "source_code": "def mask_tensor(mask: TensorLikeType, t: TensorLikeType):\n    if t.dtype is torch.bool:\n        return mask.logical_and(t)\n    else:\n        return torch.where(mask, t, 0)",
    "docstring": "Similar to torch.where(mask, t, 0) but if t is boolean, result is also boolean and not promoted to int.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:mask_tensor arg:mask arg:t arguments arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_trivial",
    "source_code": "def _is_trivial(node):\n    trivial_node_types = (gast.Name, bool, str, gast.Add, gast.Sub, gast.Mult, gast.Div, gast.Mod, gast.Pow, gast.LShift, gast.RShift, gast.BitOr, gast.BitXor, gast.BitAnd, gast.FloorDiv, gast.Invert, gast.Not, gast.UAdd, gast.USub, gast.Eq, gast.NotEq, gast.Lt, gast.LtE, gast.Gt, gast.GtE, gast.Is, gast.IsNot, gast.In, gast.NotIn, gast.expr_context)\n    if isinstance(node, trivial_node_types) and (not _is_py2_name_constant(node)):\n        return True\n    if gast_util.is_ellipsis(node):\n        return True\n    return False",
    "docstring": "Returns whether to consider the given node 'trivial'. The definition of 'trivial' is a node that can't meaningfully be pulled out into its own assignment statement. This is surprisingly difficult to do robustly across versions of Python and gast, as the parsing of constants has changed, if I may, constantly. Args: node: An AST node to check for triviality Returns: trivial: A Python indicating whether the node is trivial.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\common_transformers\\anf.py",
    "ast_data": "FunctionDef name:_is_trivial arg:node arguments arg Assign If BoolOp Call Call Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None):\n    super(InverseTimeDecay, self).__init__()\n    self.initial_learning_rate = initial_learning_rate\n    self.decay_steps = decay_steps\n    self.decay_rate = decay_rate\n    self.staircase = staircase\n    self.name = name",
    "docstring": "Applies inverse time decay to the initial learning rate. Args: initial_learning_rate: A scalar or or a Python number. The initial learning rate. decay_steps: How often to apply decay. decay_rate: A Python number. The decay rate. staircase: Whether to apply decay in a discrete staircase, as opposed to continuous, fashion. name: String. Optional name of the operation. Defaults to 'InverseTimeDecay'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\learning_rate_schedule.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:initial_learning_rate arg:decay_steps arg:decay_rate arg:staircase arg:name arguments arg arg arg arg arg arg Call Call Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "pack_x_y_sample_weight",
    "source_code": "def pack_x_y_sample_weight(x, y=None, sample_weight=None):\n    if y is None:\n        if not nest.is_nested(x):\n            return x\n        else:\n            return (x,)\n    elif sample_weight is None:\n        return (x, y)\n    else:\n        return (x, y, sample_weight)",
    "docstring": "Packs user-provided data into a tuple. This is a convenience utility for packing data into the tuple formats that uses. Standalone usage: >>> x = tf.ones((10, 1)) >>> data = tf.keras.utils.pack_x_y_sample_weight(x) >>> isinstance(data, tf.Tensor) True >>> y = tf.ones((10, 1)) >>> data = tf.keras.utils.pack_x_y_sample_weight(x, y) >>> isinstance(data, tuple) True >>> x, y = data Args: x: Features to pass to . y: Ground-truth targets to pass to . sample_weight: Sample weight for each element. Returns: Tuple in the format used in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:pack_x_y_sample_weight arg:x arg:y arg:sample_weight arguments arg arg arg If Compare If Call Return return:yes Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_initial_nodes",
    "source_code": "def _initial_nodes(n):\n    fit = 0.49082003 * n - 4.37859653\n    turnover = around(fit).astype(int)\n    ia = arange(1, int(floor(n * 0.5) + 1))\n    ib = ia[::-1]\n    xasq = _initial_nodes_a(n, ia[:turnover + 1])\n    xbsq = _initial_nodes_b(n, ib[turnover + 1:])\n    iv = sqrt(hstack([xasq, xbsq]))\n    if n % 2 == 1:\n        iv = hstack([0.0, iv])\n    return iv",
    "docstring": "Initial guesses for the Hermite roots Computes an initial approximation to the non-negative roots :math: of the Hermite polynomial :math: of order :math:. The Tricomi and Gatteschi initial guesses are used in the region where they are accurate. Parameters ---------- n : int Quadrature order Returns ------- xk : ndarray Approximate roots See Also -------- roots_hermite_asy",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:_initial_nodes arg:n arguments arg Assign Assign Call Call Assign Call Call Call Assign Assign Call Assign Call Assign Call Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_sorted_col_indices",
    "source_code": "def _get_sorted_col_indices(select_columns, column_names):\n    names_to_indices = {n: i for i, n in enumerate(column_names)}\n    num_cols = len(column_names)\n    results = []\n    for v in select_columns:\n        if isinstance(v, int):\n            if v < 0 or v >= num_cols:\n                raise ValueError(f'Column index {v} specified in `select_columns` should be > 0  and <= {num_cols}, which is the number of columns.')\n            results.append(v)\n        elif v not in names_to_indices:\n            raise ValueError(f'Column {v} specified in `select_columns` must be of one of the columns: {names_to_indices.keys()}.')\n        else:\n            results.append(names_to_indices[v])\n    results = sorted(set(results))\n    if len(results) != len(select_columns):\n        sorted_names = sorted(results)\n        duplicate_columns = set([a for a, b in zip(sorted_names[:-1], sorted_names[1:]) if a == b])\n        raise ValueError(f'The `select_columns` argument contains duplicate columns: {duplicate_columns}.')\n    return results",
    "docstring": "Transforms select_columns argument into sorted column indices.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\readers.py",
    "ast_data": "FunctionDef name:_get_sorted_col_indices arg:select_columns arg:column_names arguments arg arg Assign Call Assign Call Assign For If Call If BoolOp Compare Compare Raise Call Call If Compare Raise Call Call Call Assign Call Call If Compare Call Call Assign Call Assign Call Call Compare Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "SpatialiteGeometryColumns",
    "source_code": "class SpatialiteGeometryColumns(models.Model):\n    f_table_name = models.CharField(max_length=256)\n    f_geometry_column = models.CharField(max_length=256)\n    coord_dimension = models.IntegerField()\n    srid = models.IntegerField(primary_key=True)\n    spatial_index_enabled = models.IntegerField()\n    type = models.IntegerField(db_column='geometry_type')\n\n    class Meta:\n        app_label = 'gis'\n        db_table = 'geometry_columns'\n        managed = False\n\n    def __str__(self):\n        return '%s.%s - %dD %s field (SRID: %d)' % (self.f_table_name, self.f_geometry_column, self.coord_dimension, self.type, self.srid)\n\n    @classmethod\n    def table_name_col(cls):\n        return 'f_table_name'\n\n    @classmethod\n    def geom_col_name(cls):\n        return 'f_geometry_column'",
    "docstring": "The 'geometry_columns' table from SpatiaLite.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\models.py",
    "ast_data": "ClassDef name:SpatialiteGeometryColumns Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call ClassDef name:Meta Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:table_name_col arg:cls arguments arg Return return:yes FunctionDef name:geom_col_name arg:cls arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "Rescale",
    "source_code": "class Rescale(Module):\n\n    def __init__(self, factor: Union[float, Tensor]) -> None:\n        super().__init__()\n        if isinstance(factor, float):\n            self.factor = tensor(factor)\n        else:\n            if not isinstance(factor, Tensor) or factor.ndim != 0:\n                raise TypeError(f'Expected factor to be a float or a 0-d tensor, got {factor}.')\n            self.factor = factor\n\n    def forward(self, input: Tensor) -> Tensor:\n        return input * self.factor",
    "docstring": "Initialize the Rescale operator. Args: factor: The scaling factor. Could be a float or a 0-d tensor.",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\rescale.py",
    "ast_data": "ClassDef name:Rescale FunctionDef name:__init__ arg:self arg:factor arguments arg arg Call Call If Call Assign Call If BoolOp Call Compare Raise Call Assign FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "set_hls_values",
    "source_code": "def set_hls_values(color, h=None, l=None, s=None):\n    rgb = to_rgb(color)\n    vals = list(colorsys.rgb_to_hls(*rgb))\n    for i, val in enumerate([h, l, s]):\n        if val is not None:\n            vals[i] = val\n    rgb = colorsys.hls_to_rgb(*vals)\n    return rgb",
    "docstring": "Independently manipulate the h, l, or s channels of a color. Parameters ---------- color : matplotlib color hex, rgb-tuple, or html color name h, l, s : floats between 0 and 1, or None new values for each channel in hls space Returns ------- new_color : rgb tuple new color code in RGB tuple representation",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:set_hls_values arg:color arg:h arg:l arg:s arguments arg arg arg arg Assign Call Assign Call Call For Call If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_function_overloads",
    "source_code": "def get_function_overloads(self, node: torch.fx.Node) -> list[registration.ONNXFunction]:\n    internal_opname: registration.OpName = self._get_aten_name(node=node)\n    function_group: list[registration.ONNXFunction] | None = None\n    function_group = self.onnx_registry.get_op_functions(namespace=internal_opname.namespace, op_name=internal_opname.op_name, overload=internal_opname.overload)\n    if function_group is None:\n        function_group = self.onnx_registry.get_op_functions(namespace=internal_opname.namespace, op_name=internal_opname.op_name, overload=None)\n        if function_group is not None:\n            op_full_name = internal_opname.qualified_name()\n    if function_group is not None:\n        function_group = self._filter_or_keep_complex(node, function_group)\n        return function_group\n    op_full_name = internal_opname.qualified_name()\n    raise RuntimeError(f'Cannot find symbolic function for {op_full_name}, which should be registered under {node.target}.')",
    "docstring": "Get the function overloads from the registry. Args: node: The node to get the function overloads for. Returns: The list contains ONNXFunctions, starting with the default ones and followed by any custom ones.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py",
    "ast_data": "FunctionDef name:get_function_overloads arg:self arg:node arguments arg arg Call Assign Call If Compare Assign Call If Compare Assign Call If Compare Assign Call Return return:yes Assign Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "rand",
    "source_code": "def rand(sharding_spec: ShardingSpec, *size, dtype=None, layout=torch.strided, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format, process_group=None, init_rrefs=False) -> ShardedTensor:\n    sharded_tensor = ShardedTensor(sharding_spec, *size, dtype=dtype, layout=layout, requires_grad=requires_grad, pin_memory=pin_memory, memory_format=memory_format, process_group=process_group, init_rrefs=init_rrefs)\n    torch.nn.init.uniform_(sharded_tensor, 0, 1)\n    return sharded_tensor",
    "docstring": "Creates a :class: filled with random numbers from a uniform distribution on the interval :math:. The shape of the tensor is defined by the variable argument . Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:): The specification describing how to shard the Tensor. size (int...): a list, tuple, or of integers defining the shape of the output tensor. Keyword args: dtype (:class:, optional): the desired data type of returned tensor. Default: if `torch.set_default_dtypetorch.layouttorch.distributed.rpc.RRefShardedTensor` object on each rank",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py",
    "ast_data": "FunctionDef name:rand arg:sharding_spec arguments arg arg arg arg arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "MatlabOpaque",
    "source_code": "class MatlabOpaque(np.ndarray):\n\n    def __new__(cls, input_array):\n        obj = np.asarray(input_array).view(cls)\n        return obj",
    "docstring": "Subclass for a MATLAB opaque matrix. This is a simple subclass of :class: meant to be used by :func: and should not be directly instantiated.",
    "type": "class",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5_params.py",
    "ast_data": "ClassDef name:MatlabOpaque FunctionDef name:__new__ arg:cls arg:input_array arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    self._fit_transform(X, compute_sources=False)\n    return self",
    "docstring": "Fit the model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_fastica.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "def fit_transform(self, y):\n    return self.fit(y).transform(y)",
    "docstring": "Fit label binarizer/transform multi-class labels to binary labels. The output of transform is sometimes referred to as the 1-of-K coding scheme. Parameters ---------- y : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Target values. The 2-d matrix should only contain 0 and 1, represents multilabel classification. Sparse matrix can be CSR, CSC, COO, DOK, or LIL. Returns ------- Y : {ndarray, sparse matrix} of shape (n_samples, n_classes) Shape will be (n_samples, 1) for binary problems. Sparse matrix will be of CSR format.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:y arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_detector_name",
    "source_code": "def get_detector_name(self) -> str:\n    return 'outlier_detector'",
    "docstring": "Returns the name of this detector",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:get_detector_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_matrix_exp_pade13",
    "source_code": "def _matrix_exp_pade13(matrix):\n    b = [6.476475253248e+16, 3.238237626624e+16, 7771770303897600.0, 1187353796428800.0, 129060195264000.0, 10559470521600.0, 670442572800.0, 33522128640.0, 1323241920.0, 40840800.0, 960960.0, 16380.0, 182.0]\n    b = [constant_op.constant(x, matrix.dtype) for x in b]\n    ident = linalg_ops.eye(array_ops.shape(matrix)[-2], batch_shape=array_ops.shape(matrix)[:-2], dtype=matrix.dtype)\n    matrix_2 = math_ops.matmul(matrix, matrix)\n    matrix_4 = math_ops.matmul(matrix_2, matrix_2)\n    matrix_6 = math_ops.matmul(matrix_4, matrix_2)\n    tmp_u = math_ops.matmul(matrix_6, matrix_6 + b[11] * matrix_4 + b[9] * matrix_2) + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident\n    matrix_u = math_ops.matmul(matrix, tmp_u)\n    tmp_v = b[12] * matrix_6 + b[10] * matrix_4 + b[8] * matrix_2\n    matrix_v = math_ops.matmul(matrix_6, tmp_v) + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident\n    return (matrix_u, matrix_v)",
    "docstring": "13th-order Pade approximant for matrix exponential.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linalg_impl.py",
    "ast_data": "FunctionDef name:_matrix_exp_pade13 arg:matrix arguments arg Assign Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load",
    "source_code": "def load(self, name: str, index: Expr, mode: Any=None) -> CSEVariable:\n    return self.create_cse_var(name, bounds=ValueRanges.unknown())",
    "docstring": "Mock load function for memory planning to optimize allocations properly.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py",
    "ast_data": "FunctionDef name:load arg:self arg:name arg:index arg:mode arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, w):\n    return w",
    "docstring": "Applies the constraint to the input weight variable. By default, the inputs weight variable is not modified. Users should override this method to implement their own projection function. Args: w: Input weight variable. Returns: Projected variable (by default, returns unmodified inputs).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\constraints.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:w arguments arg arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_adjust_cat_axis",
    "source_code": "def _adjust_cat_axis(self, ax, axis):\n    if self.var_types[axis] != 'categorical':\n        return\n    if self.plot_data[axis].empty:\n        return\n    n = len(getattr(ax, f'get_{axis}ticks')())\n    if axis == 'x':\n        ax.xaxis.grid(False)\n        ax.set_xlim(-0.5, n - 0.5, auto=None)\n    else:\n        ax.yaxis.grid(False)\n        ax.set_ylim(n - 0.5, -0.5, auto=None)",
    "docstring": "Set ticks and limits for a categorical variable.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\categorical.py",
    "ast_data": "FunctionDef name:_adjust_cat_axis arg:self arg:ax arg:axis arguments arg arg arg If Compare Return return:no If Return return:no Assign Call Call Call If Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_next_layer_gather_index",
    "source_code": "def _next_layer_gather_index(bc, original_rp, broadcast_rp):\n    old_value_rowids = array_ops.gather(bc.gather_index, broadcast_rp.value_rowids())\n\n    def gi_no_broadcast():\n        old_row_starts = array_ops.gather(original_rp.row_splits(), old_value_rowids)\n        expected_row_lengths = array_ops.gather(params=original_rp.row_lengths(), indices=bc.gather_index)\n        actual_row_lengths = broadcast_rp.row_lengths()\n        check_valid = check_ops.assert_equal(expected_row_lengths, actual_row_lengths, message='Cannot broadcast')\n        gather_index = old_row_starts + broadcast_rp.offsets_in_rows()\n        return control_flow_ops.with_dependencies([check_valid], gather_index)\n\n    def gi_broadcast():\n        return old_value_rowids\n    if not original_rp.is_uniform():\n        return gi_no_broadcast()\n    do_broadcast = math_ops.equal(original_rp.uniform_row_length(), constant_op.constant(1, original_rp.dtype))\n    gather_index = cond.cond(do_broadcast, true_fn=gi_broadcast, false_fn=gi_no_broadcast)\n    return gather_index",
    "docstring": "Create the next layer gather_index whether or not a broadcast happens. *----------bc-------->* | | original_rp broadcast_rp | | \\|/ \\|/ *--next_broadcaster-->* Args: bc: the old broadcaster. original_rp: the original row partition. broadcast_rp: the target row partition. Returns: the gather_index for next_broadcaster. Raises: InvalidArgumentError if the shapes are incompatible.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_next_layer_gather_index arg:bc arg:original_rp arg:broadcast_rp arguments arg arg arg Assign Call Call FunctionDef name:gi_no_broadcast arguments Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call FunctionDef name:gi_broadcast arguments Return return:yes If Call Return return:yes Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "Reference",
    "source_code": "class Reference(_ObjectIdentityWrapper):\n    __slots__ = ()\n    unwrapped = property()\n\n    def deref(self):\n        return self._wrapped",
    "docstring": "Reference that refers an object.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\util\\object_identity.py",
    "ast_data": "ClassDef name:Reference Assign Assign Call FunctionDef name:deref arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "after_request",
    "source_code": "def after_request(self):\n    self.count -= 1",
    "docstring": "Decrement the counter after HTTP request.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\gctools.py",
    "ast_data": "FunctionDef name:after_request arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "_get_ax_legend",
    "source_code": "@final\n@staticmethod\ndef _get_ax_legend(ax: Axes):\n    leg = ax.get_legend()\n    other_ax = getattr(ax, 'left_ax', None) or getattr(ax, 'right_ax', None)\n    other_leg = None\n    if other_ax is not None:\n        other_leg = other_ax.get_legend()\n    if leg is None and other_leg is not None:\n        leg = other_leg\n        ax = other_ax\n    return (ax, leg)",
    "docstring": "Take in axes and return ax and legend under different scenarios",
    "type": "method",
    "file_path": "pandas\\pandas\\plotting\\_matplotlib\\core.py",
    "ast_data": "FunctionDef name:_get_ax_legend arg:ax arguments arg Assign Call Assign BoolOp Call Call Assign If Compare Assign Call If BoolOp Compare Compare Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_node_name",
    "source_code": "def set_node_name(node: torch.fx.Node, new_name: str, name_to_node_cache: dict[str, torch.fx.Node]):\n    node_name_to_set = collections.deque([(node, new_name)])\n    while node_name_to_set:\n        node, new_name = node_name_to_set.pop()\n        if new_name in name_to_node_cache and name_to_node_cache[new_name] != node:\n            base_name, postfix_count = _get_node_base_name(new_name)\n            if postfix_count is None:\n                postfix_count = 0\n            node_name_to_set.append((name_to_node_cache[new_name], f'{base_name}.{postfix_count + 1}'))\n        node.name = new_name\n        name_to_node_cache[new_name] = node",
    "docstring": "Safely set the unique name of a node. If the new name is already taken by another node, the name of the other node will be updated. If is a string of format f\"{base_name}.{count}\", where is an integer, the other node will be renamed as f\"{base_name}.{count+1}\". If not, the other node will be renamed as \"{new_name}.1\". This function will iteratively update the names until there is no conflict. `` is required as an argument to avoid recomputation. The caller is responsible for ensuring the cache is accurate and in sync with the owning module of the node. The values in the cache will be updated accordingly. Args: node: The node to update. new_name: The new name to use. name_to_node_cache: A cache of node names to nodes.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\_utils.py",
    "ast_data": "FunctionDef name:set_node_name arg:node arg:new_name arg:name_to_node_cache arguments arg arg arg Assign Call While Assign Call If BoolOp Compare Compare Assign Call If Compare Assign Call Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_initializing_creator",
    "source_code": "def _initializing_creator(next_creator, **kwargs):\n    v = next_creator(**kwargs)\n    session.run(v.initializer)\n    return v",
    "docstring": "Initialize the save counter if it has been newly created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:_initializing_creator arg:next_creator arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "assign_add",
    "source_code": "def assign_add(self, delta, use_locking=None, name=None, read_value=True):\n    with _handle_graph(self.handle), self._assign_dependencies():\n        assign_add_op = gen_resource_variable_ops.assign_add_variable_op(self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name)\n    if read_value:\n        return self._lazy_read(assign_add_op)\n    return assign_add_op",
    "docstring": "Adds a value to this variable. Args: delta: A . The value to add to this variable. use_locking: If , use locking during the operation. name: The name to use for the operation. read_value: A . Whether to read and return the new value of the variable or not. Returns: If is , this method will return the new value of the variable after the assignment has completed. Otherwise, when in graph mode it will return the that does the assignment, and when in eager mode it will return .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:assign_add arg:self arg:delta arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg With Call Call Assign Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_getitem_slice",
    "source_code": "def _getitem_slice(self, slobj: slice) -> IntervalIndex:\n    res = self._data[slobj]\n    return type(self)._simple_new(res, name=self._name)",
    "docstring": "Fastpath for __getitem__ when we know we have a slice.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:_getitem_slice arg:self arg:slobj arguments arg arg Assign Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "LogisticRegressionBenchmark",
    "source_code": "class LogisticRegressionBenchmark(Predictor, Estimator, Benchmark):\n    param_names = ['representation', 'solver', 'n_jobs']\n    params = (['dense', 'sparse'], ['lbfgs', 'saga'], Benchmark.n_jobs_vals)\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        representation, solver, n_jobs = params\n        if Benchmark.data_size == 'large':\n            if representation == 'sparse':\n                data = _20newsgroups_highdim_dataset(n_samples=10000)\n            else:\n                data = _20newsgroups_lowdim_dataset(n_components=1000.0)\n        elif representation == 'sparse':\n            data = _20newsgroups_highdim_dataset(n_samples=2500)\n        else:\n            data = _20newsgroups_lowdim_dataset()\n        return data\n\n    def make_estimator(self, params):\n        representation, solver, n_jobs = params\n        penalty = 'l2' if solver == 'lbfgs' else 'l1'\n        estimator = LogisticRegression(solver=solver, penalty=penalty, tol=0.01, n_jobs=n_jobs, random_state=0)\n        return estimator\n\n    def make_scorers(self):\n        make_gen_classif_scorers(self)",
    "docstring": "Benchmarks for LogisticRegression.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\linear_model.py",
    "ast_data": "ClassDef name:LogisticRegressionBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign If Compare If Compare Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Compare Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call"
  },
  {
    "library": "pandas",
    "name": "_prepare_categoricals",
    "source_code": "def _prepare_categoricals(self, data: DataFrame) -> DataFrame:\n    is_cat = [isinstance(dtype, CategoricalDtype) for dtype in data.dtypes]\n    if not any(is_cat):\n        return data\n    self._has_value_labels |= np.array(is_cat)\n    get_base_missing_value = StataMissingValue.get_base_missing_value\n    data_formatted = []\n    for col, col_is_cat in zip(data, is_cat):\n        if col_is_cat:\n            svl = StataValueLabel(data[col], encoding=self._encoding)\n            self._value_labels.append(svl)\n            dtype = data[col].cat.codes.dtype\n            if dtype == np.int64:\n                raise ValueError('It is not possible to export int64-based categorical data to Stata.')\n            values = data[col].cat.codes._values.copy()\n            if values.max() >= get_base_missing_value(dtype):\n                if dtype == np.int8:\n                    dtype = np.dtype(np.int16)\n                elif dtype == np.int16:\n                    dtype = np.dtype(np.int32)\n                else:\n                    dtype = np.dtype(np.float64)\n                values = np.array(values, dtype=dtype)\n            values[values == -1] = get_base_missing_value(dtype)\n            data_formatted.append((col, values))\n        else:\n            data_formatted.append((col, data[col]))\n    return DataFrame.from_dict(dict(data_formatted))",
    "docstring": "Check for categorical columns, retain categorical information for Stata file and convert categorical data to int",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:_prepare_categoricals arg:self arg:data arguments arg arg Assign Call If Call Return return:yes Call Assign Assign For Call If Assign Call Call Assign If Compare Raise Call Assign Call If Compare Call Call If Compare Assign Call If Compare Assign Call Assign Call Assign Call Assign Compare Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "construct_array_type",
    "source_code": "@classmethod\ndef construct_array_type(cls) -> type_t[PeriodArray]:\n    from pandas.core.arrays import PeriodArray\n    return PeriodArray",
    "docstring": "Return the array type associated with this dtype. Returns ------- type",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:construct_array_type arg:cls arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "element_spec",
    "source_code": "@abc.abstractproperty\ndef element_spec(self):\n    raise NotImplementedError('Iterator.element_spec')",
    "docstring": "The type specification of an element of this iterator. >>> dataset = tf.data.Dataset.from_tensors(42) >>> iterator = iter(dataset) >>> iterator.element_spec tf.TensorSpec(shape=(), dtype=tf.int32, name=None) For more information, read [this guide]( Returns: A (nested) structure of objects matching the structure of an element of this iterator, specifying the type of individual components.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:element_spec arg:self arguments arg Raise Call"
  },
  {
    "library": "django",
    "name": "iri_to_uri",
    "source_code": "def iri_to_uri(iri):\n    if iri is None:\n        return iri\n    elif isinstance(iri, Promise):\n        iri = str(iri)\n    return quote(iri, safe=\"/#%[]=:;$&()+,!?*@'~\")",
    "docstring": "Convert an Internationalized Resource Identifier (IRI) portion to a URI portion that is suitable for inclusion in a URL. This is the algorithm from RFC 3987 Section 3.1, slightly simplified since the input is assumed to be a string rather than an arbitrary byte stream. Take an IRI (string or UTF-8 bytes, e.g. '/I ♥ Django/' or b'/I â¥ Django/') and return a string containing the encoded result with ASCII chars only (e.g. '/I%20%E2%99%A5%20Django/').",
    "type": "function",
    "file_path": "django\\django\\utils\\encoding.py",
    "ast_data": "FunctionDef name:iri_to_uri arg:iri arguments arg If Compare Return return:yes If Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "last_group",
    "source_code": "def last_group(self, getter=None):\n    if self.last:\n        return True\n    return self._compare_group(self.item, self.__next__, getter)",
    "docstring": "Returns true if this item is the end of a new group, where groups mean that some attribute has changed. The getter can be None (the item itself changes), an attribute name like ``, a function, or a dict key or list index.",
    "type": "method",
    "file_path": "scipy\\scipy\\_build_utils\\tempita\\_looper.py",
    "ast_data": "FunctionDef name:last_group arg:self arg:getter arguments arg arg If Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "regroup",
    "source_code": "@register.tag\ndef regroup(parser, token):\n    bits = token.split_contents()\n    if len(bits) != 6:\n        raise TemplateSyntaxError(\"'regroup' tag takes five arguments\")\n    target = parser.compile_filter(bits[1])\n    if bits[2] != 'by':\n        raise TemplateSyntaxError(\"second argument to 'regroup' tag must be 'by'\")\n    if bits[4] != 'as':\n        raise TemplateSyntaxError(\"next-to-last argument to 'regroup' tag must be 'as'\")\n    var_name = bits[5]\n    expression = parser.compile_filter(var_name + VARIABLE_ATTRIBUTE_SEPARATOR + bits[3])\n    return RegroupNode(target, expression, var_name)",
    "docstring": "Regroup a list of alike objects by a common attribute. This complex tag is best illustrated by use of an example: say that `` does not work when the list to be grouped is not sorted by the key you are grouping by! This means that if your list of musicians was not sorted by instrument, you'd need to make sure it is sorted before using it, i.e.:: {% regroup musicians|dictsort:\"instrument\" by instrument as grouped %}",
    "type": "function",
    "file_path": "django\\django\\template\\defaulttags.py",
    "ast_data": "FunctionDef name:regroup arg:parser arg:token arguments arg arg Assign Call If Compare Call Raise Call Assign Call If Compare Raise Call If Compare Raise Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_tzinfo",
    "source_code": "def _get_tzinfo(tz=None):\n    tz = mpl._val_or_rc(tz, 'timezone')\n    if tz == 'UTC':\n        return UTC\n    if isinstance(tz, str):\n        tzinfo = dateutil.tz.gettz(tz)\n        if tzinfo is None:\n            raise ValueError(f'{tz} is not a valid timezone as parsed by dateutil.tz.gettz.')\n        return tzinfo\n    if isinstance(tz, datetime.tzinfo):\n        return tz\n    raise TypeError(f'tz must be string or tzinfo subclass, not {tz!r}.')",
    "docstring": "Generate from a string or return . If None, retrieve the preferred timezone from the rcParams dictionary.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:_get_tzinfo arg:tz arguments arg Assign Call If Compare Return return:yes If Call Assign Call If Compare Raise Call Return return:yes If Call Return return:yes Raise Call"
  },
  {
    "library": "cryptography",
    "name": "render",
    "source_code": "def render(self, dstbuf: memoryview, pos: int=0) -> int:\n    for frag in self.flist:\n        flen = len(frag)\n        start, pos = (pos, pos + flen)\n        dstbuf[start:pos] = frag\n    return pos",
    "docstring": "Write into bytearray",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:render arg:self arg:dstbuf arg:pos arguments arg arg arg For Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "can_pan",
    "source_code": "def can_pan(self):\n    return True",
    "docstring": "Return whether this Axes supports the pan/zoom button functionality. For a polar Axes, this is slightly misleading. Both panning and zooming are performed by the same button. Panning is performed in azimuth while zooming is done along the radial.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:can_pan arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "restore_variables",
    "source_code": "def restore_variables(self, wrapped, restore_from_saver):\n    if restore_from_saver is not None:\n        initializer, _ = restore_from_saver(constant_op.constant(self._variables_path))\n        if not ops.executing_eagerly_outside_functions():\n            ops.add_to_collection('saved_model_initializers', initializer)\n            one_unlifted = False\n            for variable in wrapped.graph.get_collection_ref(ops.GraphKeys.GLOBAL_VARIABLES):\n                if variable.graph is wrapped.graph:\n                    one_unlifted = True\n                variable._initializer_op = initializer\n            if one_unlifted:\n                logging.warning('Some variables could not be lifted out of a loaded function. Please run `sess.run(tf.get_collection(\"saved_model_initializers\"))`to restore these variables.')",
    "docstring": "Restores variables from the checkpoint.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load_v1_in_v2.py",
    "ast_data": "FunctionDef name:restore_variables arg:self arg:wrapped arg:restore_from_saver arguments arg arg arg If Compare Assign Call Call If Call Call Assign For Call If Compare Assign Assign If Call"
  },
  {
    "library": "django",
    "name": "fields",
    "source_code": "@property\ndef fields(self):\n    return [force_str(capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i)), self.encoding, strings_only=True) for i in range(self.num_fields)]",
    "docstring": "Return a list of fields in the Feature.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py",
    "ast_data": "FunctionDef name:fields arg:self arguments arg Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_RemoveExternalControlEdges",
    "source_code": "def _RemoveExternalControlEdges(self, op: ops.Operation):\n    while_ctxt = self.GetWhileContext()\n    if while_ctxt is None:\n        internal_control_inputs, external_control_inputs = (op.control_inputs, [])\n    else:\n        internal_control_inputs, external_control_inputs = ([], [])\n        for x in op.control_inputs:\n            ctxt = util.GetOutputContext(x)\n            if ctxt is not None and ctxt.GetWhileContext() == while_ctxt:\n                internal_control_inputs.append(x)\n            else:\n                external_control_inputs.append(x)\n    if len(internal_control_inputs) != len(op.control_inputs):\n        op._remove_all_control_inputs()\n        op._add_control_inputs(internal_control_inputs)\n    return (internal_control_inputs, external_control_inputs)",
    "docstring": "Remove any external control dependency on this op.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_RemoveExternalControlEdges arg:self arg:op arguments arg arg Assign Call If Compare Assign Assign For Assign Call If BoolOp Compare Compare Call Call Call If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "finalize",
    "source_code": "def finalize(self, X, y, sample_weight):\n    pass",
    "docstring": "Finalize the solvers results. Some solvers may need this, others not.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\_newton_solver.py",
    "ast_data": "FunctionDef name:finalize arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sparse, map_op, rank):\n    self._sparse = sparse\n    self._map_op = map_op\n    self._rank = tensor_shape.as_dimension(rank)",
    "docstring": "Create the metadata. Args: sparse: Python boolean. map_op: The that created the in question. This Op contains information about the underlying Map object and the dtype of the original data. rank: The statically known rank of the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\input.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sparse arg:map_op arg:rank arguments arg arg arg arg Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "convert_jax_hlo",
    "source_code": "@convert_phase(Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_JAX_HLO)\ndef convert_jax_hlo(input_content, input_names, is_proto_format, **kwargs):\n    model_flags = _model_flags_pb2.ModelFlags()\n    model_flags.use_hlo_import = True\n    if is_proto_format:\n        model_flags.hlo_file_type = _model_flags_pb2.ModelFlags.HLO_PROTO\n    else:\n        model_flags.hlo_file_type = _model_flags_pb2.ModelFlags.HLO_TEXT\n    for input_name in input_names:\n        input_array = model_flags.input_arrays.add()\n        input_array.name = input_name\n    conversion_flags = build_conversion_flags(**kwargs)\n    data = convert(model_flags, conversion_flags, input_data_str=input_content, debug_info_str=None)\n    return data",
    "docstring": "Converts a Jax hlo-based model using TFLite converter.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py",
    "ast_data": "FunctionDef name:convert_jax_hlo arg:input_content arg:input_names arg:is_proto_format arguments arg arg arg arg Assign Call Assign If Assign Assign For Assign Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, embedding_shape, initializer, weight_collections=None, trainable=True, name=None, **kwargs):\n    super(_EmbeddingColumnLayer, self).__init__(trainable=trainable, name=name, **kwargs)\n    self._embedding_shape = embedding_shape\n    self._initializer = initializer\n    self._weight_collections = weight_collections",
    "docstring": "Constructor. Args: embedding_shape: Shape of the embedding variable used for lookup. initializer: A variable initializer function to be used in embedding variable initialization. weight_collections: A list of collection names to which the Variable will be added. Note that, variables will also be added to collections and . trainable: If also add the variable to the graph collection (see ). name: Name of the layer **kwargs: keyword named properties.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:embedding_shape arg:initializer arg:weight_collections arg:trainable arg:name arguments arg arg arg arg arg arg arg Call Call Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "is_ragged",
    "source_code": "def is_ragged(tensor):\n    return isinstance(tensor, (ragged_tensor.RaggedTensor, ragged_tensor_value.RaggedTensorValue))",
    "docstring": "Returns true if is a ragged tensor or ragged tensor value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py",
    "ast_data": "FunctionDef name:is_ragged arg:tensor arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Schaffer03",
    "source_code": "class Schaffer03(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.custom_bounds = [(-10, 10), (-10, 10)]\n        self.global_optimum = [[0.0, 1.253115]]\n        self.fglob = 0.00156685\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        num = sin(cos(abs(x[0] ** 2 - x[1] ** 2))) ** 2 - 0.5\n        den = (1 + 0.001 * (x[0] ** 2 + x[1] ** 2)) ** 2\n        return 0.5 + num / den",
    "docstring": "Schaffer 3 objective function. This class defines the Schaffer 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Schaffer03}}(x) = 0.5 + \\frac{\\sin^2 \\left( \\cos \\lvert x_1^2 - x_2^2 \\rvert \\right ) - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2} with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Some new test functions for global optimization and performance of repulsive particle swarm method. Munich Personal RePEc Archive, 2006, 2718",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Schaffer03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "as_widget",
    "source_code": "def as_widget(self, widget=None, attrs=None, only_initial=False):\n    widget = widget or self.field.widget\n    if self.field.localize:\n        widget.is_localized = True\n    attrs = attrs or {}\n    attrs = self.build_widget_attrs(attrs, widget)\n    if self.auto_id and 'id' not in widget.attrs:\n        attrs.setdefault('id', self.html_initial_id if only_initial else self.auto_id)\n    if only_initial and self.html_initial_name in self.form.data:\n        value = self.form._widget_data_value(self.field.hidden_widget(), self.html_initial_name)\n    else:\n        value = self.value()\n    return widget.render(name=self.html_initial_name if only_initial else self.html_name, value=value, attrs=attrs, renderer=self.form.renderer)",
    "docstring": "Render the field by rendering the passed widget, adding any HTML attributes passed as attrs. If a widget isn't specified, use the field's default widget.",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:as_widget arg:self arg:widget arg:attrs arg:only_initial arguments arg arg arg arg Assign BoolOp If Assign Assign BoolOp Assign Call If BoolOp Compare Call If BoolOp Compare Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_matmul_2d",
    "source_code": "def _matmul_2d(a, b, **kwargs):\n    ragged_err = 'The matrices in `a` and `b` may not be ragged in their innermost dimension.'\n    checks = []\n    if isinstance(a, ragged_tensor.RaggedTensor):\n        original_size = array_ops.size(a.flat_values)\n        a = a.to_tensor()\n        checks.append(check_ops.assert_equal(original_size, array_ops.size(a), message=ragged_err))\n    if isinstance(b, ragged_tensor.RaggedTensor):\n        original_size = array_ops.size(b.flat_values)\n        b = b.to_tensor()\n        checks.append(check_ops.assert_equal(original_size, array_ops.size(b), message=ragged_err))\n    with ops.control_dependencies(checks):\n        return math_ops.matmul(a, b, **kwargs)",
    "docstring": "Multiplies potentially ragged 2D tensors. Args: a: A 2D Tensor or RaggedTensor with b: A 2D Tensor or RaggedTensor with **kwargs: Additional arguments for (e.g. transpose_a). Returns: A 2D Tensor with .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:_matmul_2d arg:a arg:b arguments arg arg arg Assign Assign If Call Assign Call Assign Call Call Call Call If Call Assign Call Assign Call Call Call Call With Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_asdict",
    "source_code": "def _asdict(self):\n    out = _dict(_zip(self._fields, self))\n    out.update(self.__dict__)\n    return out",
    "docstring": "Return a new dict which maps field names to their values.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_bunch.py",
    "ast_data": "FunctionDef name:_asdict arg:self arguments arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "list_builtin",
    "source_code": "def list_builtin(self, filter_=None):\n    if filter_ == BackendFilter.INTERACTIVE:\n        return [k for k, v in self._BUILTIN_BACKEND_TO_GUI_FRAMEWORK.items() if v != 'headless']\n    elif filter_ == BackendFilter.NON_INTERACTIVE:\n        return [k for k, v in self._BUILTIN_BACKEND_TO_GUI_FRAMEWORK.items() if v == 'headless']\n    return [*self._BUILTIN_BACKEND_TO_GUI_FRAMEWORK]",
    "docstring": "Return list of backends that are built into Matplotlib. Parameters ---------- filter_ : , optional Filter to apply to returned backends. For example, to return only non-interactive backends use . Returns ------- list of str Backend names.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\registry.py",
    "ast_data": "FunctionDef name:list_builtin arg:self arg:filter_ arguments arg arg If Compare Return return:yes Call Compare If Compare Return return:yes Call Compare Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "@_available_if_base_estimator_has('predict_proba')\ndef predict_proba(self, X):\n    return self._get_predictions(X, output_method='predict_proba')",
    "docstring": "Predict probability estimates. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- Y_prob : array-like of shape (n_samples, n_classes) The predicted probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\multioutput.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_skipped_use_sharded_views",
    "source_code": "@property\ndef _skipped_use_sharded_views(self) -> bool:\n    return self._unsharded_flat_param_for_skipped_views is not None",
    "docstring": "This property is used for sharding strategies that do not free after forward with ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py",
    "ast_data": "FunctionDef name:_skipped_use_sharded_views arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "scipy",
    "name": "ihilbert",
    "source_code": "def ihilbert(x, _cache=_cache):\n    if isinstance(_cache, threading.local):\n        if not hasattr(_cache, 'ihilbert_cache'):\n            _cache.ihilbert_cache = {}\n        _cache = _cache.ihilbert_cache\n    return -hilbert(x, _cache)",
    "docstring": "Return inverse Hilbert transform of a periodic sequence x. If `` are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = -sqrt(-1)*sign(j) * x_j y_0 = 0",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py",
    "ast_data": "FunctionDef name:ihilbert arg:x arg:_cache arguments arg arg If Call If Call Assign Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_add_q",
    "source_code": "def _add_q(self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, check_filterable=True, summarize=False, update_join_types=True):\n    connector = q_object.connector\n    current_negated ^= q_object.negated\n    branch_negated = branch_negated or q_object.negated\n    target_clause = WhereNode(connector=connector, negated=q_object.negated)\n    joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)\n    for child in q_object.children:\n        child_clause, needed_inner = self.build_filter(child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, summarize=summarize, update_join_types=update_join_types)\n        joinpromoter.add_votes(needed_inner)\n        if child_clause:\n            target_clause.add(child_clause, connector)\n    if update_join_types:\n        needed_inner = joinpromoter.update_join_types(self)\n    else:\n        needed_inner = []\n    return (target_clause, needed_inner)",
    "docstring": "Add a Q-object to the current filter.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:_add_q arg:self arg:q_object arg:used_aliases arg:branch_negated arg:current_negated arg:allow_joins arg:split_subq arg:check_filterable arg:summarize arg:update_join_types arguments arg arg arg arg arg arg arg arg arg arg Assign Assign BoolOp Assign Call Assign Call Call For Assign Call Call If Call If Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "lambda2nu",
    "source_code": "@xp_capabilities()\ndef lambda2nu(lambda_: 'npt.ArrayLike') -> Any:\n    xp = array_namespace(lambda_)\n    return c / _asarray(lambda_, xp=xp, subok=True)",
    "docstring": "Convert wavelength to optical frequency Parameters ---------- lambda_ : array_like Wavelength(s) to be converted. Returns ------- nu : float or array of floats Equivalent optical frequency. Notes ----- Computes `` where c = 299792458.0, i.e., the (vacuum) speed of light in meters/second. Examples -------- >>> from scipy.constants import lambda2nu, speed_of_light >>> import numpy as np >>> lambda2nu(np.array((1, speed_of_light))) array([ 2.99792458e+08, 1.00000000e+00])",
    "type": "function",
    "file_path": "scipy\\scipy\\constants\\_constants.py",
    "ast_data": "FunctionDef name:lambda2nu arg:lambda_ arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "Location",
    "source_code": "class Location(collections.namedtuple('Location', ('filename', 'lineno', 'col_offset'))):\n\n    @property\n    def line_loc(self):\n        return LineLocation(self.filename, self.lineno)",
    "docstring": "Encodes code location information. Attributes: filename: Text lineno: int, 1-based col_offset: int line_loc: LineLocation",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\origin_info.py",
    "ast_data": "ClassDef name:Location Call FunctionDef name:line_loc arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "BlobHessian",
    "source_code": "class BlobHessian(Module):\n\n    def __init__(self, grads_mode: str='sobel') -> None:\n        super().__init__()\n        self.grads_mode: str = grads_mode\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(grads_mode={self.grads_mode})'\n\n    def forward(self, input: Tensor, sigmas: Optional[Tensor]=None) -> Tensor:\n        return hessian_response(input, self.grads_mode, sigmas)",
    "docstring": "Module that calculates Hessian blobs. .. image:: _static/img/hessian_response.png See :func: for details.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\responses.py",
    "ast_data": "ClassDef name:BlobHessian FunctionDef name:__init__ arg:self arg:grads_mode arguments arg arg Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:forward arg:self arg:input arg:sigmas arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_non_traceable_module_classes",
    "source_code": "def set_non_traceable_module_classes(self, module_classes: list[type]) -> PrepareCustomConfig:\n    self.non_traceable_module_classes = module_classes\n    return self",
    "docstring": "Set the modules that are not symbolically traceable, identified by class.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:set_non_traceable_module_classes arg:self arg:module_classes arguments arg arg Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_NotAnArray",
    "source_code": "class _NotAnArray:\n\n    def __init__(self, data):\n        self.data = np.asarray(data)\n\n    def __array__(self, dtype=None, copy=None):\n        return self.data\n\n    def __array_function__(self, func, types, args, kwargs):\n        if func.__name__ == 'may_share_memory':\n            return True\n        raise TypeError(\"Don't want to call array_function {}!\".format(func.__name__))",
    "docstring": "An object that is convertible to an array. Parameters ---------- data : array-like The data.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "ClassDef name:_NotAnArray FunctionDef name:__init__ arg:self arg:data arguments arg arg Assign Call FunctionDef name:__array__ arg:self arg:dtype arg:copy arguments arg arg arg Return return:yes FunctionDef name:__array_function__ arg:self arg:func arg:types arg:args arg:kwargs arguments arg arg arg arg arg If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ActivityRegularization",
    "source_code": "class ActivityRegularization(Layer):\n\n    def __init__(self, l1=0.0, l2=0.0, **kwargs):\n        super(ActivityRegularization, self).__init__(activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)\n        self.supports_masking = True\n        self.l1 = l1\n        self.l2 = l2\n\n    def compute_output_shape(self, input_shape):\n        return input_shape\n\n    def get_config(self):\n        config = {'l1': self.l1, 'l2': self.l2}\n        base_config = super(ActivityRegularization, self).get_config()\n        return dict(list(base_config.items()) + list(config.items()))",
    "docstring": "Layer that applies an update to the cost function based input activity. Args: l1: L1 regularization factor (positive float). l2: L2 regularization factor (positive float). Input shape: Arbitrary. Use the keyword argument (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\core.py",
    "ast_data": "ClassDef name:ActivityRegularization FunctionDef name:__init__ arg:self arg:l1 arg:l2 arguments arg arg arg arg Call Call Call Assign Assign Assign FunctionDef name:compute_output_shape arg:self arg:input_shape arguments arg arg Return return:yes FunctionDef name:get_config arg:self arguments arg Assign Assign Call Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_QueryReference",
    "source_code": "class _QueryReference(nodes.Inline, nodes.TextElement):\n\n    def to_query_string(self):\n        return '&'.join((f'{name}={value}' for name, value in self.attlist()))",
    "docstring": "Wraps a reference or pending reference to add a query string. The query string is generated from the attributes added to this node. Also equivalent to a node.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\sphinxext\\roles.py",
    "ast_data": "ClassDef name:_QueryReference FunctionDef name:to_query_string arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_depth",
    "source_code": "def get_depth(self):\n    check_is_fitted(self)\n    return self.tree_.max_depth",
    "docstring": "Return the depth of the decision tree. The depth of a tree is the maximum distance between the root and any leaf. Returns ------- self.tree_.max_depth : int The maximum depth of the tree.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\tree\\_classes.py",
    "ast_data": "FunctionDef name:get_depth arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "UnspecializedPythonVariable",
    "source_code": "class UnspecializedPythonVariable(TensorVariable):\n    _nonvar_fields = {'raw_value', 'need_unwrap', *TensorVariable._nonvar_fields}\n\n    def __init__(self, proxy: torch.fx.Proxy, *, raw_value=None, need_unwrap=True, **kwargs) -> None:\n        super().__init__(proxy, **kwargs)\n        self.raw_value = raw_value\n        self.need_unwrap = need_unwrap\n\n    @classmethod\n    def from_tensor_variable(cls, tensor_variable, raw_value, need_unwrap=True):\n        return UnspecializedPythonVariable(**dict(tensor_variable.__dict__), raw_value=raw_value, need_unwrap=need_unwrap)",
    "docstring": "This is a 1-element tensor represents unspecialized python float/int.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\tensor.py",
    "ast_data": "ClassDef name:UnspecializedPythonVariable Assign FunctionDef name:__init__ arg:self arg:proxy arguments arg arg arg arg arg Call Call Assign Assign FunctionDef name:from_tensor_variable arg:cls arg:tensor_variable arg:raw_value arg:need_unwrap arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "attr_value_proto",
    "source_code": "def attr_value_proto(dtype, shape, s):\n    attr = {}\n    if s is not None:\n        attr['attr'] = AttrValue(s=s.encode(encoding='utf_8'))\n    if shape is not None:\n        shapeproto = tensor_shape_proto(shape)\n        attr['_output_shapes'] = AttrValue(list=AttrValue.ListValue(shape=[shapeproto]))\n    return attr",
    "docstring": "Create a dict of objects matching a NodeDef's attr field. Follows specifically designed for a NodeDef. The values have been reverse engineered from standard TensorBoard logged data.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\_proto_graph.py",
    "ast_data": "FunctionDef name:attr_value_proto arg:dtype arg:shape arg:s arguments arg arg arg Assign If Compare Assign Call Call If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "efficientvit_backbone_b1",
    "source_code": "def efficientvit_backbone_b1(**kwargs: dict[str, Any]) -> EfficientViTBackbone:\n    backbone = EfficientViTBackbone(width_list=[16, 32, 64, 128, 256], depth_list=[1, 2, 3, 3, 4], dim=16, **build_kwargs_from_config(kwargs, EfficientViTBackbone))\n    return backbone",
    "docstring": "Create EfficientViT B1.",
    "type": "function",
    "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\backbone.py",
    "ast_data": "FunctionDef name:efficientvit_backbone_b1 arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "make_traced",
    "source_code": "def make_traced(fn: Callable[P, T]) -> Callable[P, T]:\n\n    def _traced(*args: P.args, **kwargs: P.kwargs) -> T:\n        executor = str(kwargs.pop('executor', 'aten'))\n        wrapped, all_args = wrapper_and_args_for_make_fx(fn, args, kwargs)\n        with TorchRefsMode():\n            gm = make_fx(wrapped)(all_args)\n        return execute(gm, all_args, executor=executor)\n    return _traced",
    "docstring": "Returns a function that, when called, will trace its torch operations to prims and then execute those prims on the requested trace executor (possibly lowering them to that trace executor first). Only supports the torch operations defined in _torch_to_reference_map in context.py and operations with positional args. All args must be tensors. In the near future all these restrictions will be lifted. Example usage: def foo(a, b): return torch.add(a, b) traced_foo = make_traced(foo) a = torch.randn((1, 2, 3, 4, 5), device='cuda') b = torch.randn((1, 2, 3, 4, 5), device='cuda') result = traced_foo(a, b, executor='aten')",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims\\executor.py",
    "ast_data": "FunctionDef name:make_traced arg:fn arguments arg FunctionDef name:_traced arguments arg arg Assign Call Call Assign Call With Call Assign Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_userinfo_encrypted_response_enc",
    "source_code": "def validate_userinfo_encrypted_response_enc(self):\n    if self.get('userinfo_encrypted_response_enc') and (not self.get('userinfo_encrypted_response_alg')):\n        raise InvalidClaimError('userinfo_encrypted_response_enc')\n    if self.get('userinfo_encrypted_response_alg'):\n        self.setdefault('userinfo_encrypted_response_enc', 'A128CBC-HS256')\n    self._validate_claim_value('userinfo_encrypted_response_enc')",
    "docstring": "JWE enc algorithm [JWA] REQUIRED for encrypting UserInfo Responses. If userinfo_encrypted_response_alg is specified, the default userinfo_encrypted_response_enc value is A128CBC-HS256. When userinfo_encrypted_response_enc is included, userinfo_encrypted_response_alg MUST also be provided.",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\registration\\claims.py",
    "ast_data": "FunctionDef name:validate_userinfo_encrypted_response_enc arg:self arguments arg If BoolOp Call Call Raise Call If Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_metadata_routing",
    "source_code": "def get_metadata_routing(self):\n    router = MetadataRouter(owner=self.__class__.__name__).add(estimator=self.estimator, method_mapping=MethodMapping().add(callee='fit', caller='fit'))\n    return router",
    "docstring": "Get metadata routing of this object. Please check :ref: on how the routing mechanism works. .. versionadded:: 1.5 Returns ------- routing : MetadataRouter A :class: encapsulating routing information.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_iterative.py",
    "ast_data": "FunctionDef name:get_metadata_routing arg:self arguments arg Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__del__",
    "source_code": "def __del__(self):\n    self._timer_stop()",
    "docstring": "Need to stop timer and possibly disconnect timer.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:__del__ arg:self arguments arg Call"
  },
  {
    "library": "scipy",
    "name": "_cumulative_simpson_unequal_intervals",
    "source_code": "def _cumulative_simpson_unequal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray:\n    x21 = dx[..., :-1]\n    x32 = dx[..., 1:]\n    f1 = y[..., :-2]\n    f2 = y[..., 1:-1]\n    f3 = y[..., 2:]\n    x31 = x21 + x32\n    x21_x31 = x21 / x31\n    x21_x32 = x21 / x32\n    x21x21_x31x32 = x21_x31 * x21_x32\n    coeff1 = 3 - x21_x31\n    coeff2 = 3 + x21x21_x31x32 + x21_x31\n    coeff3 = -x21x21_x31x32\n    return x21 / 6 * (coeff1 * f1 + coeff2 * f2 + coeff3 * f3)",
    "docstring": "Calculate the Simpson integrals for all h1 intervals assuming unequal interval widths. The function can also be used to calculate the integral for all h2 intervals by reversing the inputs, and .",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_quadrature.py",
    "ast_data": "FunctionDef name:_cumulative_simpson_unequal_intervals arg:y arg:dx arguments arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "denormalize_pixel_coordinates",
    "source_code": "def denormalize_pixel_coordinates(pixel_coordinates: Tensor, height: int, width: int, eps: float=1e-08) -> Tensor:\n    if pixel_coordinates.shape[-1] != 2:\n        raise ValueError(f'Input pixel_coordinates must be of shape (*, 2). Got {pixel_coordinates.shape}')\n    hw: Tensor = stack([tensor(width), tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)\n    factor: Tensor = tensor(2.0) / (hw - 1).clamp(eps)\n    return tensor(1.0) / factor * (pixel_coordinates + 1)",
    "docstring": "Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates: the normalized grid coordinates. Shape can be :math:. width: the maximum width in the x-axis. height: the maximum height in the y-axis. eps: safe division by zero. Return: the denormalized pixel coordinates with shape :math:. Examples: >>> coords = tensor([[-1., -1.]]) >>> denormalize_pixel_coordinates(coords, 100, 50) tensor([[0., 0.]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:denormalize_pixel_coordinates arg:pixel_coordinates arg:height arg:width arg:eps arguments arg arg arg arg If Compare Raise Call Call Call Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_result",
    "source_code": "def set_result(self, result: T) -> None:\n    super().set_result(result)",
    "docstring": "Set the result for this ``. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) >>> import threading >>> import time >>> def slow_set_future(fut, value): ... time.sleep(0.5) ... fut.set_result(value) >>> fut = torch.futures.Future() >>> t = threading.Thread( ... target=slow_set_future, ... args=(fut, torch.ones(2) * 3) ... ) >>> t.start() >>> print(fut.wait()) tensor([3., 3.]) >>> t.join()",
    "type": "method",
    "file_path": "pytorch\\torch\\futures\\__init__.py",
    "ast_data": "FunctionDef name:set_result arg:self arg:result arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "list_backends",
    "source_code": "def list_backends(exclude_tags=('debug', 'experimental')) -> list[str]:\n    import torch._dynamo\n    return torch._dynamo.list_backends(exclude_tags)",
    "docstring": "Return valid strings that can be passed to . Args: exclude_tags(optional): A tuple of strings representing tags to exclude.",
    "type": "function",
    "file_path": "pytorch\\torch\\compiler\\__init__.py",
    "ast_data": "FunctionDef name:list_backends arg:exclude_tags arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_build_doc",
    "source_code": "def _build_doc(self):\n    raise AbstractMethodError(self)",
    "docstring": "Return a tree-like object that can be used to iterate over the DOM. Returns ------- node-like The DOM from which to parse the table element.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_build_doc arg:self arguments arg Raise Call"
  },
  {
    "library": "cherrypy",
    "name": "__call__",
    "source_code": "def __call__(self):\n    raise self",
    "docstring": "Use this exception as a request.handler (raise self).",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cperror.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Raise"
  },
  {
    "library": "authlib",
    "name": "is_expired",
    "source_code": "def is_expired(self):\n    raise NotImplementedError()",
    "docstring": "A method to define if this token is expired. For instance, there is a column `` in the table:: def is_expired(self): return self.expired_at < now :return: boolean",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:is_expired arg:self arguments arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "argrelmin",
    "source_code": "def argrelmin(data, axis=0, order=1, mode='clip'):\n    return argrelextrema(data, np.less, axis, order, mode)",
    "docstring": "Calculate the relative minima of . Parameters ---------- data : ndarray Array in which to find the relative minima. axis : int, optional Axis over which to select from . Default is 0. order : int, optional How many points on each side to use for the comparison to consider `kdatadataargrelextremadatafind_peaksdata`. .. versionadded:: 0.11.0 Examples -------- >>> import numpy as np >>> from scipy.signal import argrelmin >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) >>> argrelmin(x) (array([1, 5]),) >>> y = np.array([[1, 2, 1, 2], ... [2, 2, 0, 0], ... [5, 3, 4, 4]]) ... >>> argrelmin(y, axis=1) (array([0, 2]), array([2, 1]))",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_peak_finding.py",
    "ast_data": "FunctionDef name:argrelmin arg:data arg:axis arg:order arg:mode arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "z",
    "source_code": "@z.setter\ndef z(self, value):\n    if not self.hasz:\n        raise GEOSException('Cannot set Z on 2D Point.')\n    self._cs.setOrdinate(2, 0, value)",
    "docstring": "Set the Z component of the Point.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\point.py",
    "ast_data": "FunctionDef name:z arg:self arg:value arguments arg arg If Raise Call Call"
  },
  {
    "library": "authlib",
    "name": "introspect_token",
    "source_code": "def introspect_token(self, url, token=None, token_type_hint=None, body=None, auth=None, headers=None, **kwargs):\n    if auth is None:\n        auth = self.client_auth(self.token_endpoint_auth_method)\n    return self._handle_token_hint('introspect_token_request', url, token=token, token_type_hint=token_type_hint, body=body, auth=auth, headers=headers, **kwargs)",
    "docstring": "Implementation of OAuth 2.0 Token Introspection defined via _. :param url: Introspection Endpoint, must be HTTPS. :param token: The token to be introspected. :param token_type_hint: The type of the token that to be revoked. It can be \"access_token\" or \"refresh_token\". :param body: Optional application/x-www-form-urlencoded body to add the include in the token request. Prefer kwargs over body. :param auth: An auth tuple or method as accepted by requests. :param headers: Dict to default request headers with. :return: Introspection Response .. _:",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\client.py",
    "ast_data": "FunctionDef name:introspect_token arg:self arg:url arg:token arg:token_type_hint arg:body arg:auth arg:headers arguments arg arg arg arg arg arg arg arg If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cell):\n    self._cell = cell",
    "docstring": "Creates a new CounterCell. Args: cell: A c pointer of TFE_MonitoringCounterCell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cell arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "capture_begin",
    "source_code": "def capture_begin(self, pool=None, capture_error_mode='global'):\n    super().capture_begin(pool=pool, capture_error_mode=capture_error_mode)",
    "docstring": "Begin capturing CUDA work on the current stream. Typically, you shouldn't call `~torch.cuda.graph~torch.cuda.make_graphed_callables~torch.cuda.graph_pool_handleother_Graph_instance.pool()Graph memory managementcudaStreamCaptureMode `_",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\graphs.py",
    "ast_data": "FunctionDef name:capture_begin arg:self arg:pool arg:capture_error_mode arguments arg arg arg Call Call"
  },
  {
    "library": "django",
    "name": "get_next_month",
    "source_code": "def get_next_month(self, date):\n    return _get_next_prev(self, date, is_previous=False, period='month')",
    "docstring": "Get the next valid month.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_next_month arg:self arg:date arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "read_header",
    "source_code": "def read_header(self):\n    data = read_dtype(self.mat_stream, self.dtypes['header'])\n    name = self.mat_stream.read(int(data['namlen'])).strip(b'\\x00')\n    if data['mopt'] < 0 or data['mopt'] > 5000:\n        raise ValueError('Mat 4 mopt wrong format, byteswapping problem?')\n    M, rest = divmod(data['mopt'], 1000)\n    if M not in (0, 1):\n        warnings.warn(f\"We do not support byte ordering '{order_codes[M]}'; returned data may be corrupt\", UserWarning, stacklevel=3)\n    O, rest = divmod(rest, 100)\n    if O != 0:\n        raise ValueError('O in MOPT integer should be 0, wrong format?')\n    P, rest = divmod(rest, 10)\n    T = rest\n    dims = (data['mrows'], data['ncols'])\n    is_complex = data['imagf'] == 1\n    dtype = self.dtypes[P]\n    return VarHeader4(name, dtype, T, dims, is_complex)",
    "docstring": "Read and return header for variable",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py",
    "ast_data": "FunctionDef name:read_header arg:self arguments arg Assign Call Assign Call Call Call If BoolOp Compare Compare Raise Call Assign Call If Compare Call Assign Call If Compare Raise Call Assign Call Assign Assign Assign Compare Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_num_elements",
    "source_code": "def _num_elements(losses):\n    with ops.name_scope(None, 'num_elements', values=[losses]) as scope:\n        return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype)",
    "docstring": "Computes the number of elements in tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\losses_impl.py",
    "ast_data": "FunctionDef name:_num_elements arg:losses arguments arg With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ismodule",
    "source_code": "def ismodule(object):\n    return _inspect.ismodule(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.ismodule.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py",
    "ast_data": "FunctionDef name:ismodule arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_params",
    "source_code": "def get_params(self, deep=True):\n    params = dict(kernel=self.kernel, exponent=self.exponent)\n    if deep:\n        deep_items = self.kernel.get_params().items()\n        params.update((('kernel__' + k, val) for k, val in deep_items))\n    return params",
    "docstring": "Get parameters of this kernel. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py",
    "ast_data": "FunctionDef name:get_params arg:self arg:deep arguments arg arg Assign Call If Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_saved_model",
    "source_code": "@classmethod\n@_deprecation.deprecated(None, 'Use `lite.TFLiteConverter.from_saved_model` instead.')\ndef from_saved_model(cls, saved_model_dir, input_arrays=None, input_shapes=None, output_arrays=None, tag_set=None, signature_key=None):\n    return TFLiteConverter.from_saved_model(saved_model_dir, input_arrays, input_shapes, output_arrays, tag_set, signature_key)",
    "docstring": "Creates a TocoConverter class from a SavedModel.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:from_saved_model arg:cls arg:saved_model_dir arg:input_arrays arg:input_shapes arg:output_arrays arg:tag_set arg:signature_key arguments arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "minimize",
    "source_code": "def minimize(self, loss, var_list, grad_loss=None, name=None, tape=None):\n    grads_and_vars = self._compute_gradients(loss, var_list=var_list, grad_loss=grad_loss, tape=tape)\n    return self.apply_gradients(grads_and_vars, name=name)",
    "docstring": "Minimize by updating . This method simply computes gradient using and calls . If you want to process the gradient before applying then call and explicitly instead of using this function. Args: loss: or callable. If a callable, should take no arguments and return the value to minimize. If a , the argument must be passed. var_list: list or tuple of objects to update to minimize , or a callable returning the list or tuple of objects. Use callable when the variable list would otherwise be incomplete before since the variables are created at the first time is called. grad_loss: (Optional). A holding the gradient computed for . name: (Optional) str. Name for the returned operation. tape: (Optional) . If is provided as a , the tape that computed the must be provided. Returns: An that updates the variables in . The will be automatically increased by 1. Raises: ValueError: If some of the variables are not objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:minimize arg:self arg:loss arg:var_list arg:grad_loss arg:name arg:tape arguments arg arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "ToolHome",
    "source_code": "class ToolHome(ViewsPositionsBase):\n    description = 'Reset original view'\n    image = 'mpl-data/images/home'\n    default_keymap = property(lambda self: mpl.rcParams['keymap.home'])\n    _on_trigger = 'home'",
    "docstring": "Restore the original view limits.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "ClassDef name:ToolHome Assign Assign Assign Call arguments arg Assign"
  },
  {
    "library": "pytorch",
    "name": "_powerSGD_comm_hook_wrapper",
    "source_code": "def _powerSGD_comm_hook_wrapper(comm_hook, model, state, matrix_approximation_rank, start_powerSGD_iter=1000):\n    powerSGD_state = powerSGD.PowerSGDState(process_group=state, matrix_approximation_rank=matrix_approximation_rank, start_powerSGD_iter=start_powerSGD_iter)\n    model.register_comm_hook(powerSGD_state, comm_hook)",
    "docstring": "Wrap PowerSGD communication hook. To be consistent with the wrappers of other DDP comm hooks, the input state only needs to be a process group, which will be wrapped up with other state info.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\__init__.py",
    "ast_data": "FunctionDef name:_powerSGD_comm_hook_wrapper arg:comm_hook arg:model arg:state arg:matrix_approximation_rank arg:start_powerSGD_iter arguments arg arg arg arg arg Assign Call Call"
  },
  {
    "library": "django",
    "name": "_format_lazy",
    "source_code": "def _format_lazy(format_string, *args, **kwargs):\n    return format_string.format(*args, **kwargs)",
    "docstring": "Apply str.format() on 'format_string' where format_string, args, and/or kwargs might be lazy.",
    "type": "function",
    "file_path": "django\\django\\utils\\text.py",
    "ast_data": "FunctionDef name:_format_lazy arg:format_string arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_copy_over_q_dq_args",
    "source_code": "def _copy_over_q_dq_args(original_node: Node, replacement_node: Node):\n    assert original_node.target == replacement_node.target\n    if original_node.target in (torch.ops.quantized_decomposed.quantize_per_tensor.default, torch.ops.quantized_decomposed.dequantize_per_tensor.default):\n        start_copy_arg_index = 1\n    elif original_node.target in (torch.ops.quantized_decomposed.quantize_per_channel.default, torch.ops.quantized_decomposed.dequantize_per_channel.default):\n        start_copy_arg_index = 3\n    else:\n        raise ValueError(f\"Expected quantize/dequantize nodes, got '{original_node.target}'\")\n    replacement_node.args = replacement_node.args[:start_copy_arg_index] + original_node.args[start_copy_arg_index:]",
    "docstring": "Given a pair of quantize or dequantize nodes, copy over all literal args from the original node to the replacement node.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_copy_over_q_dq_args arg:original_node arg:replacement_node arguments arg arg Compare If Compare Assign If Compare Assign Raise Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, obj):\n    if isinstance(obj, type):\n        return self._decorate_class(obj)\n    elif isinstance(obj, property):\n        return self._decorate_property(obj)\n    else:\n        return self._decorate_fun(obj)",
    "docstring": "Call method Parameters ---------- obj : object",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\deprecation.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:obj arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "golden",
    "source_code": "def golden(func, args=(), brack=None, tol=_epsilon, full_output=0, maxiter=5000):\n    options = {'xtol': tol, 'maxiter': maxiter}\n    res = _minimize_scalar_golden(func, brack, args, **options)\n    if full_output:\n        return (res['x'], res['fun'], res['nfev'])\n    else:\n        return res['x']",
    "docstring": "Return the minimizer of a function of one variable using the golden section method. Given a function of one variable and a possible bracketing interval, return a minimizer of the function isolated to a fractional precision of tol. Parameters ---------- func : callable func(x,*args) Objective function to minimize. args : tuple, optional Additional arguments (if present), passed to func. brack : tuple, optional Either a triple ``xa >> def f(x): ... return (x-1)**2 >>> from scipy import optimize >>> minimizer = optimize.golden(f, brack=(1, 2)) >>> minimizer 1 >>> res = optimize.golden(f, brack=(-1, 0.5, 2), full_output=True) >>> xmin, fval, funcalls = res >>> f(xmin), fval (9.925165290385052e-18, 9.925165290385052e-18)",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:golden arg:func arg:args arg:brack arg:tol arg:full_output arg:maxiter arguments arg arg arg arg arg arg Assign Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "stroke",
    "source_code": "def stroke(self):\n    return self._linewidth > 0 and self._alpha > 0 and (len(self._rgb) <= 3 or self._rgb[3] != 0.0)",
    "docstring": "Predicate: does the path need to be stroked (its outline drawn)? This tests for the various conditions that disable stroking the path, in which case it would presumably be filled.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:stroke arg:self arguments arg Return return:yes BoolOp Compare Compare BoolOp Compare Call Compare"
  },
  {
    "library": "django",
    "name": "sym_difference",
    "source_code": "def sym_difference(self, other):\n    return self._topology(capi.geos_symdifference(self.ptr, other.ptr))",
    "docstring": "Return a set combining the points in this Geometry not in other, and the points in other not in this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:sym_difference arg:self arg:other arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "scrapy",
    "name": "failure_to_exc_info",
    "source_code": "def failure_to_exc_info(failure: Failure) -> tuple[type[BaseException], BaseException, TracebackType | None] | None:\n    if isinstance(failure, Failure):\n        assert failure.type\n        assert failure.value\n        return (failure.type, failure.value, cast(Optional[TracebackType], failure.getTracebackObject()))\n    return None",
    "docstring": "Extract exc_info from Failure instances",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\log.py",
    "ast_data": "FunctionDef name:failure_to_exc_info arg:failure arguments arg If Call Return return:yes Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "restore_op",
    "source_code": "def restore_op(self, filename_tensor, saveable, preferred_shard):\n    tensors = []\n    for spec in saveable.specs:\n        tensors.append(io_ops.restore_v2(filename_tensor, [spec.name], [spec.slice_spec], [spec.dtype])[0])\n    return tensors",
    "docstring": "Create ops to restore 'saveable'. This is intended to be overridden by subclasses that want to generate different Ops. Args: filename_tensor: String Tensor. saveable: A BaseSaverBuilder.SaveableObject object. preferred_shard: Int. Shard to open first when loading a sharded file. Returns: A list of Tensors resulting from reading 'saveable' from 'filename'.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:restore_op arg:self arg:filename_tensor arg:saveable arg:preferred_shard arguments arg arg arg arg Assign For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_num_workers",
    "source_code": "def _get_num_workers(cluster_spec):\n    if not cluster_spec:\n        return 0\n    return len(cluster_spec.as_dict().get(_TaskType.WORKER, [])) + len(cluster_spec.as_dict().get(_TaskType.CHIEF, []))",
    "docstring": "Gets number of workers including chief.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_coordinator.py",
    "ast_data": "FunctionDef name:_get_num_workers arg:cluster_spec arguments arg If Return return:yes Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_opt_einsum",
    "source_code": "def get_opt_einsum() -> Any:\n    return _opt_einsum",
    "docstring": "Return the opt_einsum package if opt_einsum is currently available, else None.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\opt_einsum\\__init__.py",
    "ast_data": "FunctionDef name:get_opt_einsum arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_as_indexed_slices",
    "source_code": "@dispatch.add_dispatch_support\ndef _as_indexed_slices(x, optimize=True):\n    if not isinstance(x, (tensor_lib.Tensor, indexed_slices.IndexedSlices)):\n        raise TypeError(f'Not a Tensor or IndexedSlices: {type(x)}.')\n    if isinstance(x, indexed_slices.IndexedSlices):\n        return x\n    x_shape = array_ops.shape_internal(x, optimize=optimize)\n    return indexed_slices.IndexedSlices(x, range(0, x_shape[0]), x_shape)",
    "docstring": "Convert 'x' to IndexedSlices. Convert a dense Tensor to a block-sparse IndexedSlices. Args: x: Either a Tensor object, or an IndexedSlices object. optimize: if true, attempt to optimize the conversion of 'x'. Returns: An IndexedSlices object. Raises: TypeError: If 'x' is not a Tensor or an IndexedSlices object.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:_as_indexed_slices arg:x arg:optimize arguments arg arg If Call Raise Call Call If Call Return return:yes Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "batch_norm_with_global_normalization_v2",
    "source_code": "@tf_export('nn.batch_norm_with_global_normalization', v1=[])\n@dispatch.add_dispatch_support\ndef batch_norm_with_global_normalization_v2(input, mean, variance, beta, gamma, variance_epsilon, scale_after_normalization, name=None):\n    return batch_norm_with_global_normalization(t=input, m=mean, v=variance, beta=beta, gamma=gamma, variance_epsilon=variance_epsilon, scale_after_normalization=scale_after_normalization, name=name)",
    "docstring": "Batch normalization. This op is deprecated. See . Args: input: A 4D input Tensor. mean: A 1D mean Tensor with size matching the last dimension of t. This is the first output from tf.nn.moments, or a saved moving average thereof. variance: A 1D variance Tensor with size matching the last dimension of t. This is the second output from tf.nn.moments, or a saved moving average thereof. beta: A 1D beta Tensor with size matching the last dimension of t. An offset to be added to the normalized tensor. gamma: A 1D gamma Tensor with size matching the last dimension of t. If \"scale_after_normalization\" is true, this tensor will be multiplied with the normalized tensor. variance_epsilon: A small float number to avoid dividing by 0. scale_after_normalization: A bool indicating whether the resulted tensor needs to be multiplied with gamma. name: A name for this operation (optional). Returns: A batch-normalized . References: Batch Normalization - Accelerating Deep Network Training by Reducing Internal Covariate Shift: [Ioffe et al., 2015]( ([pdf](",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py",
    "ast_data": "FunctionDef name:batch_norm_with_global_normalization_v2 arg:input arg:mean arg:variance arg:beta arg:gamma arg:variance_epsilon arg:scale_after_normalization arg:name arguments arg arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_position_3d",
    "source_code": "def get_position_3d(self):\n    return (self._x, self._y, self._z)",
    "docstring": "Return the (x, y, z) position of the text.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:get_position_3d arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_buffer",
    "source_code": "def get_buffer(program: 'ExportedProgram', node: torch.fx.Node) -> Optional[torch.Tensor]:\n    if is_buffer(program, node):\n        buffer_name = program.graph_signature.inputs_to_buffers[node.name]\n        if buffer_name in program.graph_signature.non_persistent_buffers:\n            return program.constants[buffer_name]\n        else:\n            return program.state_dict[buffer_name]\n    return None",
    "docstring": "Returns the buffer associated with the given node in the exported program. Returns None if the node is not a buffer within the exported program",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\utils.py",
    "ast_data": "FunctionDef name:get_buffer arg:program arg:node arguments arg arg If Call Assign If Compare Return return:yes Return return:yes Return return:no"
  },
  {
    "library": "cherrypy",
    "name": "__setitem__",
    "source_code": "def __setitem__(self, k, v):\n    dict.__setitem__(self, k, v)\n    self.namespaces({k: v})",
    "docstring": "Assign a config setting.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py",
    "ast_data": "FunctionDef name:__setitem__ arg:self arg:k arg:v arguments arg arg arg Call Call"
  },
  {
    "library": "django",
    "name": "get_field_size",
    "source_code": "def get_field_size(name):\n    m = field_size_re.search(name)\n    return int(m[1]) if m else None",
    "docstring": "Extract the size number from a \"varchar(11)\" type name",
    "type": "function",
    "file_path": "django\\django\\db\\backends\\sqlite3\\introspection.py",
    "ast_data": "FunctionDef name:get_field_size arg:name arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_memory_map",
    "source_code": "def _maybe_memory_map(handle: str | BaseBuffer, memory_map: bool) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]:\n    handles: list[BaseBuffer] = []\n    memory_map &= hasattr(handle, 'fileno') or isinstance(handle, str)\n    if not memory_map:\n        return (handle, memory_map, handles)\n    handle = cast(ReadCsvBuffer, handle)\n    if isinstance(handle, str):\n        handle = open(handle, 'rb')\n        handles.append(handle)\n    try:\n        wrapped = _IOWrapper(mmap.mmap(handle.fileno(), 0, access=mmap.ACCESS_READ))\n    finally:\n        for handle in reversed(handles):\n            handle.close()\n    return (wrapped, memory_map, [wrapped])",
    "docstring": "Try to memory map file/buffer.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:_maybe_memory_map arg:handle arg:memory_map arguments arg arg BoolOp Call Call If Return return:yes Assign Call If Call Assign Call Call Try Assign Call Call Call For Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "string_width_height",
    "source_code": "def string_width_height(self, s):\n    if not len(s):\n        return (0, 0)\n    total_width = 0\n    namelast = None\n    miny = 1000000000.0\n    maxy = 0\n    for c in s:\n        if c == '\\n':\n            continue\n        wx, name, bbox = self._metrics[ord(c)]\n        total_width += wx + self._kern.get((namelast, name), 0)\n        l, b, w, h = bbox\n        miny = min(miny, b)\n        maxy = max(maxy, b + h)\n        namelast = name\n    return (total_width, maxy - miny)",
    "docstring": "Return the string width (including kerning) and string height as a (*w*, *h*) tuple.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:string_width_height arg:self arg:s arguments arg arg If Call Return return:yes Assign Assign Assign Assign For If Compare Assign Call Call Assign Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cond_v2",
    "source_code": "def cond_v2(pred, true_fn, false_fn, name='cond'):\n    if isinstance(pred, bool):\n        raise TypeError('pred must not be a Python bool', pred)\n    if not name:\n        name = 'cond'\n    with ops.name_scope(name) as scope:\n        true_name = util.unique_fn_name(scope, 'true')\n        false_name = util.unique_fn_name(scope, 'false')\n        add_control_dependencies = ops.get_default_graph()._add_control_dependencies\n        pred = _normalize_pred(pred)\n        true_graph = func_graph_module.func_graph_from_py_func(true_name, true_fn, [], {}, func_graph=util.CondBranchFuncGraph(true_name, collections=ops.get_default_graph()._collections), add_control_dependencies=add_control_dependencies, op_return_value=pred)\n        false_graph = func_graph_module.func_graph_from_py_func(false_name, false_fn, [], {}, func_graph=util.CondBranchFuncGraph(false_name, collections=ops.get_default_graph()._collections), add_control_dependencies=add_control_dependencies, op_return_value=pred)\n        verify_captures(_COND, [true_graph, false_graph])\n        return _build_cond(pred, true_graph, false_graph, true_graph.external_captures, false_graph.external_captures, building_gradient=False, name=scope)",
    "docstring": "Like tf.cond, except emits a single If op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:cond_v2 arg:pred arg:true_fn arg:false_fn arg:name arguments arg arg arg arg If Call Raise Call If Assign With Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "write_report_proto",
    "source_code": "def write_report_proto(self, report_path, report_proto, tt_parameters):\n    gfile.MakeDirs(tt_parameters.trace_dir)\n    with gfile.GFile(report_path, 'wb') as f:\n        f.write(report_proto.SerializeToString())",
    "docstring": "Writes the given report proto under trace_dir.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py",
    "ast_data": "FunctionDef name:write_report_proto arg:self arg:report_path arg:report_proto arg:tt_parameters arguments arg arg arg arg Call With Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "summary",
    "source_code": "def summary(self, line_length=None, positions=None, print_fn=None):\n    if not self.built:\n        raise ValueError('This model has not yet been built. Build the model first by calling `build()` or calling `fit()` with some data, or specify an `input_shape` argument in the first layer(s) for automatic build.')\n    layer_utils.print_summary(self, line_length=line_length, positions=positions, print_fn=print_fn)",
    "docstring": "Prints a string summary of the network. Args: line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to . print_fn: Print function to use. Defaults to . It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. Raises: ValueError: if is called before the model is built.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:summary arg:self arg:line_length arg:positions arg:print_fn arguments arg arg arg arg If Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "deco_binary_ufunc",
    "source_code": "def deco_binary_ufunc(torch_func):\n\n    @normalizer\n    def wrapped(x1: ArrayLikeOrScalar, x2: ArrayLikeOrScalar, /, out: Optional[OutArray]=None, *, where: NotImplementedType=True, casting: Optional[CastingModes]='same_kind', order: NotImplementedType='K', dtype: Optional[DTypeLike]=None, subok: NotImplementedType=False, signature: NotImplementedType=None, extobj: NotImplementedType=None):\n        if dtype is not None:\n\n            def cast(x, dtype):\n                if isinstance(x, torch.Tensor):\n                    return _util.typecast_tensor(x, dtype, casting)\n                else:\n                    return torch.as_tensor(x, dtype=dtype)\n            x1 = cast(x1, dtype)\n            x2 = cast(x2, dtype)\n        elif isinstance(x1, torch.Tensor) and isinstance(x2, torch.Tensor):\n            dtype = _dtypes_impl.result_type_impl(x1, x2)\n            x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)\n        else:\n            x1, x2 = _dtypes_impl.nep50_to_tensors(x1, x2, torch_func.__name__ in NEP50_FUNCS, torch_func.__name__)\n        result = torch_func(x1, x2)\n        return _ufunc_postprocess(result, out, casting)\n    wrapped.__qualname__ = torch_func.__name__\n    wrapped.__name__ = torch_func.__name__\n    return wrapped",
    "docstring": "Common infra for binary ufuncs. Normalize arguments, sort out type casting, broadcasting and delegate to the pytorch functions for the actual work.",
    "type": "function",
    "file_path": "pytorch\\torch\\_numpy\\_ufuncs.py",
    "ast_data": "FunctionDef name:deco_binary_ufunc arg:torch_func arguments arg FunctionDef name:wrapped arg:out arguments arg arg arg arg arg arg arg arg arg arg If Compare FunctionDef name:cast arg:x arg:dtype arguments arg arg If Call Return return:yes Call Return return:yes Call Assign Call Assign Call If BoolOp Call Call Assign Call Assign Call Assign Call Compare Assign Call Return return:yes Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_dispatch_kl",
    "source_code": "def _dispatch_kl(type_p, type_q):\n    matches = [(super_p, super_q) for super_p, super_q in _KL_REGISTRY if issubclass(type_p, super_p) and issubclass(type_q, super_q)]\n    if not matches:\n        return NotImplemented\n    left_p, left_q = min((_Match(*m) for m in matches)).types\n    right_q, right_p = min((_Match(*reversed(m)) for m in matches)).types\n    left_fun = _KL_REGISTRY[left_p, left_q]\n    right_fun = _KL_REGISTRY[right_p, right_q]\n    if left_fun is not right_fun:\n        warnings.warn(f'Ambiguous kl_divergence({type_p.__name__}, {type_q.__name__}). Please register_kl({left_p.__name__}, {right_q.__name__})', RuntimeWarning)\n    return left_fun",
    "docstring": "Find the most specific approximate match, assuming single inheritance.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\kl.py",
    "ast_data": "FunctionDef name:_dispatch_kl arg:type_p arg:type_q arguments arg arg Assign BoolOp Call Call If Return return:yes Assign Call Call Assign Call Call Call Assign Assign If Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "G",
    "source_code": "def G(w):\n    return abs(k / prod(1j * w - p))",
    "docstring": "Gain of filter",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:G arg:w arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "InvalidParameterError",
    "source_code": "class InvalidParameterError(ValueError, TypeError):\n    pass",
    "docstring": "Custom exception to be raised when the parameter of a class/method/function does not have a valid type or value.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:InvalidParameterError"
  },
  {
    "library": "pytorch",
    "name": "expires",
    "source_code": "@contextmanager\ndef expires(after: float, scope: Optional[str]=None, client: Optional[TimerClient]=None):\n    if client is None:\n        if _timer_client is None:\n            raise RuntimeError('Configure timer client before using countdown timers.')\n        client = _timer_client\n    if scope is None:\n        caller = getframeinfo(stack()[1][0])\n        scope = f'{caller.filename}#{caller.lineno}'\n    expiration = time.time() + after\n    client.acquire(scope, expiration)\n    try:\n        yield\n    finally:\n        client.release(scope)",
    "docstring": "Acquires a countdown timer that expires in `` that the client talks to will ultimately make the decision when and how to reap the workers with expired timers. Usage:: torch.distributed.elastic.timer.configure(LocalTimerClient()) with expires(after=10): torch.distributed.all_reduce(...)",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "FunctionDef name:expires arg:after arg:scope arg:client arguments arg arg arg If Compare If Compare Raise Call Assign If Compare Assign Call Call Assign Assign Call Call Try Call"
  },
  {
    "library": "pytorch",
    "name": "std",
    "source_code": "@_apply_docstring_templates\ndef std(input: Union[Tensor, MaskedTensor], dim: DimOrDims=None, unbiased: Optional[bool]=None, *, correction: Optional[int]=None, keepdim: Optional[bool]=False, dtype: Optional[DType]=None, mask: Optional[Tensor]=None) -> Tensor:\n    return _std_var(input=input, dim=dim, unbiased=unbiased, correction_opt=correction, keepdim=keepdim, dtype=dtype, mask=mask, take_sqrt=True)",
    "docstring": "{reduction_signature} {reduction_descr} The identity value of sample standard deviation operation is undefined. The elements of output tensor with strided layout, that correspond to fully masked-out elements, have `` values. {reduction_args} {reduction_example}",
    "type": "function",
    "file_path": "pytorch\\torch\\masked\\_ops.py",
    "ast_data": "FunctionDef name:std arg:input arg:dim arg:unbiased arguments arg arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "revert_all_patches",
    "source_code": "def revert_all_patches(self):\n    for patch in self.patches_made:\n        patch.revert()\n    return self.patches_made",
    "docstring": "Remove all the stored patcheds. It doesn't modify patches_made.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py",
    "ast_data": "FunctionDef name:revert_all_patches arg:self arguments arg For Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_standalone_module_class",
    "source_code": "def set_standalone_module_class(self, module_class: type, qconfig_mapping: Optional[QConfigMapping], example_inputs: tuple[Any, ...], prepare_custom_config: Optional[PrepareCustomConfig], backend_config: Optional[BackendConfig]) -> PrepareCustomConfig:\n    self.standalone_module_classes[module_class] = StandaloneModuleConfigEntry(qconfig_mapping, example_inputs, prepare_custom_config, backend_config)\n    return self",
    "docstring": "Set the configuration for running a standalone module identified by `` will be used instead.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:set_standalone_module_class arg:self arg:module_class arg:qconfig_mapping arg:example_inputs arg:prepare_custom_config arg:backend_config arguments arg arg arg arg arg arg Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_FunctionWrapper",
    "source_code": "class _FunctionWrapper:\n\n    def __init__(self, f, args):\n        self.f = f\n        self.args = [] if args is None else args\n\n    def __call__(self, x):\n        return self.f(x, *self.args)",
    "docstring": "Object to wrap user's function, allowing picklability",
    "type": "class",
    "file_path": "scipy\\scipy\\_lib\\_util.py",
    "ast_data": "ClassDef name:_FunctionWrapper FunctionDef name:__init__ arg:self arg:f arg:args arguments arg arg arg Assign Assign Compare FunctionDef name:__call__ arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "memory_usage_string",
    "source_code": "@property\ndef memory_usage_string(self) -> str:\n    return f'{_sizeof_fmt(self.memory_usage_bytes, self.size_qualifier)}\\n'",
    "docstring": "Memory usage in a form of human readable string.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:memory_usage_string arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "all_py_loaded_overloads",
    "source_code": "def all_py_loaded_overloads() -> Iterator[torch._ops.OpOverload]:\n    for ns in torch.ops:\n        packets = getattr(torch.ops, ns)\n        for op_name in packets:\n            packet = getattr(packets, op_name)\n            for overload in packet:\n                yield getattr(packet, overload)",
    "docstring": "Warning: the set of overloads this will report is very subtle. It is precisely the set of torch.ops functions that have actually been accessed from Python (e.g., we actually called torch.ops.aten.blah at some point. This is DIFFERENT from the set of registered operators, which will in general be a larger set, as this would include all operators which we ran C++ static initializers or Python operator registration on. This does not eagerly populate the list on torch.ops.aten; this list is lazy! In other words, this is good for traversing over everything that has an OpOverload object allocated in Python. We use it for cache invalidation, but don't rely on this list being complete. Note that even if we did report all C++ registered overloads, this isn't guaranteed to be complete either, as a subsequent lazy load of a library which triggers more registrations could add more things to the set.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dispatch\\python.py",
    "ast_data": "FunctionDef name:all_py_loaded_overloads arguments For Assign Call For Assign Call For Call"
  },
  {
    "library": "pytorch",
    "name": "allocator",
    "source_code": "@property\ndef allocator(self) -> Optional[_cuda_CUDAAllocator]:\n    return super().allocator",
    "docstring": "Returns the allocator this MemPool routes allocations to.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:allocator arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_LowerCholesky",
    "source_code": "class _LowerCholesky(Constraint):\n    event_dim = 2\n\n    def check(self, value):\n        value_tril = value.tril()\n        lower_triangular = (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]\n        positive_diagonal = (value.diagonal(dim1=-2, dim2=-1) > 0).min(-1)[0]\n        return lower_triangular & positive_diagonal",
    "docstring": "Constrain to lower-triangular square matrices with positive diagonals.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\constraints.py",
    "ast_data": "ClassDef name:_LowerCholesky Assign FunctionDef name:check arg:self arg:value arguments arg arg Assign Call Assign Call Call Compare Assign Call Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "finalized",
    "source_code": "@property\ndef finalized(self) -> bool:\n    return self._finalized",
    "docstring": "True if this graph has been finalized.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:finalized arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    return self._decision_function(X)",
    "docstring": "Predict using the linear model. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns ------- ndarray of shape (n_samples,) Predicted target values per element in X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Return return:yes Call"
  },
  {
    "library": "virtualenv",
    "name": "add_parser_arguments",
    "source_code": "@classmethod\ndef add_parser_arguments(cls, parser, interpreter):\n    pass",
    "docstring": "Add CLI arguments for this activation script. :param parser: the CLI parser :param interpreter: the interpreter this virtual environment is based of",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\activation\\activator.py",
    "ast_data": "FunctionDef name:add_parser_arguments arg:cls arg:parser arg:interpreter arguments arg arg arg"
  },
  {
    "library": "pandas",
    "name": "_validate_usecols_arg",
    "source_code": "def _validate_usecols_arg(usecols):\n    msg = \"'usecols' must either be list-like of all strings, all unicode, all integers or a callable.\"\n    if usecols is not None:\n        if callable(usecols):\n            return (usecols, None)\n        if not is_list_like(usecols):\n            raise ValueError(msg)\n        usecols_dtype = lib.infer_dtype(usecols, skipna=False)\n        if usecols_dtype not in ('empty', 'integer', 'string'):\n            raise ValueError(msg)\n        usecols = set(usecols)\n        return (usecols, usecols_dtype)\n    return (usecols, None)",
    "docstring": "Validate the 'usecols' parameter. Checks whether or not the 'usecols' parameter contains all integers (column selection by index), strings (column by name) or is a callable. Raises a ValueError if that is not the case. Parameters ---------- usecols : list-like, callable, or None List of columns to use when parsing or a callable that can be used to filter a list of table columns. Returns ------- usecols_tuple : tuple A tuple of (verified_usecols, usecols_dtype). 'verified_usecols' is either a set if an array-like is passed in or 'usecols' if a callable or None is passed in. 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like is passed in or None if a callable or None is passed in.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\parsers\\base_parser.py",
    "ast_data": "FunctionDef name:_validate_usecols_arg arg:usecols arguments arg Assign If Compare If Call Return return:yes If Call Raise Call Assign Call If Compare Raise Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_client_name",
    "source_code": "def validate_client_name(self):\n    pass",
    "docstring": "Human-readable string name of the client to be presented to the end-user during authorization. If omitted, the authorization server MAY display the raw \"client_id\" value to the end-user instead. It is RECOMMENDED that clients always send this field. The value of this field MAY be internationalized, as described in Section 2.2.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py",
    "ast_data": "FunctionDef name:validate_client_name arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "sort_vertices_of_regions",
    "source_code": "def sort_vertices_of_regions(self):\n    if self._dim != 3:\n        raise TypeError('Only supported for three-dimensional point sets')\n    _voronoi.sort_vertices_of_regions(self._simplices, self.regions)",
    "docstring": "Sort indices of the vertices to be (counter-)clockwise ordered. Raises ------ TypeError If the points are not three-dimensional. Notes ----- For each region in regions, it sorts the indices of the Voronoi vertices such that the resulting points are in a clockwise or counterclockwise order around the generator point. This is done as follows: Recall that the n-th region in regions surrounds the n-th generator in points and that the k-th Voronoi vertex in vertices is the circumcenter of the k-th triangle in self._simplices. For each region n, we choose the first triangle (=Voronoi vertex) in self._simplices and a vertex of that triangle not equal to the center n. These determine a unique neighbor of that triangle, which is then chosen as the second triangle. The second triangle will have a unique vertex not equal to the current vertex or the center. This determines a unique neighbor of the second triangle, which is then chosen as the third triangle and so forth. We proceed through all the triangles (=Voronoi vertices) belonging to the generator in points and obtain a sorted version of the vertices of its surrounding region.",
    "type": "method",
    "file_path": "scipy\\scipy\\spatial\\_spherical_voronoi.py",
    "ast_data": "FunctionDef name:sort_vertices_of_regions arg:self arguments arg If Compare Raise Call Call"
  },
  {
    "library": "pandas",
    "name": "_make_table_cell_attributes",
    "source_code": "def _make_table_cell_attributes(self, cell: ExcelCell) -> dict[str, int | str]:\n    attributes: dict[str, int | str] = {}\n    style_name = self._process_style(cell.style)\n    if style_name is not None:\n        attributes['stylename'] = style_name\n    if cell.mergestart is not None and cell.mergeend is not None:\n        attributes['numberrowsspanned'] = max(1, cell.mergestart)\n        attributes['numbercolumnsspanned'] = cell.mergeend\n    return attributes",
    "docstring": "Convert cell attributes to OpenDocument attributes Parameters ---------- cell : ExcelCell Spreadsheet cell data Returns ------- attributes : Dict[str, Union[int, str]] Dictionary with attributes and attribute values",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_odswriter.py",
    "ast_data": "FunctionDef name:_make_table_cell_attributes arg:self arg:cell arguments arg arg Assign Call If Compare Assign If BoolOp Compare Compare Assign Call Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "as_dict",
    "source_code": "def as_dict(self, is_private=False, **params):\n    return {'keys': [k.as_dict(is_private, **params) for k in self.keys]}",
    "docstring": "Represent this key as a dict of the JSON Web Key Set.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7517\\key_set.py",
    "ast_data": "FunctionDef name:as_dict arg:self arg:is_private arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "cudart",
    "source_code": "def cudart():\n    _lazy_init()\n    return _cudart",
    "docstring": "Retrieves the CUDA runtime API module. This function initializes the CUDA runtime environment if it is not already initialized and returns the CUDA runtime API module (_cudart). The CUDA runtime API module provides access to various CUDA runtime functions. Args: `trace_name.prof--profile-from-start offcudaProfilerStart--csv--print-summary-o-f` option forces the overwrite of the output file if it already exists.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:cudart arguments Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "gen_ops",
    "source_code": "def gen_ops() -> dict[Any, Any]:\n    arch = get_cuda_arch()\n    version = get_cuda_version()\n    return _gen_ops_cached(arch, version)",
    "docstring": "Generates all supported CUTLASS operations.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cutlass_utils.py",
    "ast_data": "FunctionDef name:gen_ops arguments Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "min_max_axis",
    "source_code": "def min_max_axis(X, axis, ignore_nan=False):\n    if sp.issparse(X) and X.format in ('csr', 'csc'):\n        if ignore_nan:\n            return _sparse_nan_min_max(X, axis=axis)\n        else:\n            return _sparse_min_max(X, axis=axis)\n    else:\n        _raise_typeerror(X)",
    "docstring": "Compute minimum and maximum along an axis on a CSR or CSC matrix. Optionally ignore NaN values. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Input data. It should be of CSR or CSC format. axis : {0, 1} Axis along which the axis should be computed. ignore_nan : bool, default=False Ignore or passing through NaN values. .. versionadded:: 0.20 Returns ------- mins : ndarray of shape (n_features,), dtype={np.float32, np.float64} Feature-wise minima. maxs : ndarray of shape (n_features,), dtype={np.float32, np.float64} Feature-wise maxima.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py",
    "ast_data": "FunctionDef name:min_max_axis arg:X arg:axis arg:ignore_nan arguments arg arg arg If BoolOp Call Compare If Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "iter_leaves",
    "source_code": "def iter_leaves(self):\n    if self.left is None:\n        yield self\n    else:\n        yield from self.left.iter_leaves()\n        yield from self.right.iter_leaves()",
    "docstring": "Iterate over all the cluster leaves in the tree.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py",
    "ast_data": "FunctionDef name:iter_leaves arg:self arguments arg If Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "register_codec",
    "source_code": "def register_codec(x):\n    _codecs.append(x)",
    "docstring": "Registers a codec to use for encoding/decoding. Args: x: The codec object to register. The object must implement can_encode, do_encode, can_decode, and do_decode. See the various _*Codec classes for examples.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "FunctionDef name:register_codec arg:x arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "def load_state_dict(self, state_dict: dict[str, Any]) -> None:\n    self._check_overlap_initialized()\n    for index, value in state_dict['state'].items():\n        param = self._index_to_param[index]\n        if self._param_to_rank[param] != self.rank:\n            state_dict['state'][index] = None\n        else:\n            self.optim.state[param] = _recursive_copy_to_device(value, non_blocking=True, device=param.device)\n            for state_name, state_value in self.optim.state[param].items():\n                if torch.is_tensor(state_value) and state_value.dim() == 0:\n                    self.optim.state[param][state_name] = state_value.cpu()\n    super().load_state_dict(state_dict)\n    self._sync_param_groups(state_dict['param_groups'], self.param_groups)\n    self._sync_param_groups(self.param_groups, self.optim.param_groups)",
    "docstring": "Load the state pertaining to the given rank from the input `state_dictZeroRedundancyOptimizerDistributedDataParallel` gradient buckets have been rebuilt.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Call For Call Assign If Compare Assign Assign Call For Call If BoolOp Call Compare Call Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "op_reg_gen",
    "source_code": "def op_reg_gen(func):\n    op_reg_code, _ = OpRegGen().transform(func, None)\n    return op_reg_code",
    "docstring": "Parse a function and emit the TFR functions.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\tfr\\python\\op_reg_gen.py",
    "ast_data": "FunctionDef name:op_reg_gen arg:func arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "last",
    "source_code": "def last(x: Series):\n    arr = x.array[notna(x.array)]\n    if not len(arr):\n        return x.array.dtype.na_value\n    return arr[-1]",
    "docstring": "Helper function for last item that isn't NA.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:last arg:x arguments arg Assign Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PerRow",
    "source_code": "class PerRow(Granularity):\n    pass",
    "docstring": "Represents row-wise granularity in quantization. This is a special case of per-axis quantization and is unique to Float8 matmuls where the input is quantized with a block_size of (1, ..., input.shape[-1]). And the weight is quantized with a block_size of (1, weight.shape[1]).",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\quantization\\observer.py",
    "ast_data": "ClassDef name:PerRow"
  },
  {
    "library": "tensorflow",
    "name": "to_structured_signature",
    "source_code": "def to_structured_signature(function_type: FunctionType) -> Tuple[Any, Any]:\n\n    def to_signature(x_type):\n        if x_type is None:\n            raise TypeError(f'Can not generate structured signature if FunctionType is not fully specified. Received {function_type}')\n        return x_type.placeholder_value(trace_type.InternalPlaceholderContext(unnest_only=True))\n    args_signature = []\n    kwargs_signature = {}\n    for p in function_type.parameters.values():\n        if p.kind == Parameter.POSITIONAL_ONLY:\n            args_signature.append(to_signature(p.type_constraint))\n        else:\n            kwargs_signature[p.name] = to_signature(p.type_constraint)\n    input_signature = (tuple(args_signature), kwargs_signature)\n    output_signature = to_signature(function_type.output)\n    return (input_signature, output_signature)",
    "docstring": "Returns structured input and output signatures from a FunctionType.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:to_structured_signature arg:function_type arguments arg FunctionDef name:to_signature arg:x_type arguments arg If Compare Raise Call Return return:yes Call Call Assign Assign For Call If Compare Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "TimerRequest",
    "source_code": "class TimerRequest:\n    __slots__ = ['worker_id', 'scope_id', 'expiration_time']\n\n    def __init__(self, worker_id: Any, scope_id: str, expiration_time: float):\n        self.worker_id = worker_id\n        self.scope_id = scope_id\n        self.expiration_time = expiration_time\n\n    def __eq__(self, other):\n        if isinstance(other, TimerRequest):\n            return self.worker_id == other.worker_id and self.scope_id == other.scope_id and (self.expiration_time == other.expiration_time)\n        return False",
    "docstring": "Data object representing a countdown timer acquisition and release that is used between the `` is implementation specific. It is whatever the TimerServer and TimerClient implementations have on to uniquely identify a worker.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py",
    "ast_data": "ClassDef name:TimerRequest Assign FunctionDef name:__init__ arg:self arg:worker_id arg:scope_id arg:expiration_time arguments arg arg arg arg Assign Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes BoolOp Compare Compare Compare Return return:yes"
  },
  {
    "library": "django",
    "name": "sanitize_file_name",
    "source_code": "def sanitize_file_name(self, file_name):\n    file_name = html.unescape(file_name)\n    file_name = file_name.rsplit('/')[-1]\n    file_name = file_name.rsplit('\\\\')[-1]\n    file_name = ''.join([char for char in file_name if char.isprintable()])\n    if file_name in {'', '.', '..'}:\n        return None\n    return file_name",
    "docstring": "Sanitize the filename of an upload. Remove all possible path separators, even though that might remove more than actually required by the target system. Filenames that could potentially cause problems (current/parent dir) are also discarded. It should be noted that this function could still return a \"filepath\" like \"C:some_file.txt\" which is handled later on by the storage layer. So while this function does sanitize filenames to some extent, the resulting filename should still be considered as untrusted user input.",
    "type": "method",
    "file_path": "django\\django\\http\\multipartparser.py",
    "ast_data": "FunctionDef name:sanitize_file_name arg:self arg:file_name arguments arg arg Assign Call Assign Call Assign Call Assign Call Call If Compare Return return:no Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compare_graphs",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef compare_graphs(left: Graph, right: Graph) -> bool:\n    matcher = SubgraphMatcher(left, match_output=True, match_placeholder=True)\n    matches = matcher.match(right)\n    return len(matches) > 0",
    "docstring": "Return True if two graphs are identical, i.e they - have the same number of outputs in the same order - have the same number of inputs in the same order - have the same set of nodes, and identical connectivity",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\utils\\common.py",
    "ast_data": "FunctionDef name:compare_graphs arg:left arg:right arguments arg arg Assign Call Assign Call Return return:yes Compare Call Call"
  },
  {
    "library": "authlib",
    "name": "TokenAuth",
    "source_code": "class TokenAuth:\n    DEFAULT_TOKEN_TYPE = 'bearer'\n    SIGN_METHODS = {'bearer': add_bearer_token}\n\n    def __init__(self, token, token_placement='header', client=None):\n        self.token = OAuth2Token.from_dict(token)\n        self.token_placement = token_placement\n        self.client = client\n        self.hooks = set()\n\n    def set_token(self, token):\n        self.token = OAuth2Token.from_dict(token)\n\n    def prepare(self, uri, headers, body):\n        token_type = self.token.get('token_type', self.DEFAULT_TOKEN_TYPE)\n        sign = self.SIGN_METHODS[token_type.lower()]\n        uri, headers, body = sign(self.token['access_token'], uri, headers, body, self.token_placement)\n        for hook in self.hooks:\n            uri, headers, body = hook(uri, headers, body)\n        return (uri, headers, body)\n\n    def __del__(self):\n        del self.client\n        del self.hooks",
    "docstring": "Attach token information to HTTP requests. :param token: A dict or OAuth2Token instance of an OAuth 2.0 token :param token_placement: The placement of the token, default is ``, available choices: * header (default) * body * uri",
    "type": "class",
    "file_path": "authlib\\authlib\\oauth2\\auth.py",
    "ast_data": "ClassDef name:TokenAuth Assign Assign FunctionDef name:__init__ arg:self arg:token arg:token_placement arg:client arguments arg arg arg arg Assign Call Assign Assign Assign Call FunctionDef name:set_token arg:self arg:token arguments arg arg Assign Call FunctionDef name:prepare arg:self arg:uri arg:headers arg:body arguments arg arg arg arg Assign Call Assign Call Assign Call For Assign Call Return return:yes FunctionDef name:__del__ arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "ifftshift",
    "source_code": "@tf_export('signal.ifftshift')\n@dispatch.add_dispatch_support\ndef ifftshift(x, axes=None, name=None):\n    with _ops.name_scope(name, 'ifftshift') as name:\n        x = _ops.convert_to_tensor(x)\n        if axes is None:\n            axes = tuple(range(x.shape.ndims))\n            shift = -(_array_ops.shape(x) // 2)\n        elif isinstance(axes, int):\n            shift = -(_array_ops.shape(x)[axes] // 2)\n        else:\n            rank = _array_ops.rank(x)\n            axes = _array_ops.where(_math_ops.less(axes, 0), axes + rank, axes)\n            shift = -(_array_ops.gather(_array_ops.shape(x), axes) // 2)\n        return manip_ops.roll(x, shift, axes, name)",
    "docstring": "The inverse of fftshift. Although identical for even-length x, the functions differ by one sample for odd-length x. @compatibility(numpy) Equivalent to numpy.fft.ifftshift. @end_compatibility For example: Args: x: , input tensor. axes: or shape Axes over which to calculate. Defaults to None, which shifts all axes. name: An optional name for the operation. Returns: A , The shifted tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\fft_ops.py",
    "ast_data": "FunctionDef name:ifftshift arg:x arg:axes arg:name arguments arg arg arg With Call Assign Call If Compare Assign Call Call Assign Call If Call Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "@property\ndef name(self):\n    return self._name",
    "docstring": "Name prepended to all ops created by this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ResourceClosure",
    "source_code": "class ResourceClosure(Closure):\n\n    def _init_remote_value(self):\n        return RemoteValueImpl(self, self._output_type_spec)\n\n    def build_output_remote_value(self):\n        if self._output_remote_value_ref is None:\n            ret = self._init_remote_value()\n            self._output_remote_value_ref = weakref.ref(ret)\n            return ret\n        else:\n            return self._output_remote_value_ref()",
    "docstring": "A closure that builds a resource on a worker. ResourceClosures keep a reference to the closure object, which is used to rerun the closure upon recovery to ensure workers have access to the resources they need.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "ClassDef name:ResourceClosure FunctionDef name:_init_remote_value arg:self arguments arg Return return:yes Call FunctionDef name:build_output_remote_value arg:self arguments arg If Compare Assign Call Assign Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "asarray",
    "source_code": "def asarray(obj: complex | NestedSequence[complex] | Array | SupportsBufferProtocol, /, *, dtype: DType | None=None, device: Device | None=None, copy: py_bool | None=None, **kwargs: object) -> Array:\n    _helpers._check_device(da, device)\n    if isinstance(obj, da.Array):\n        if dtype is not None and dtype != obj.dtype:\n            if copy is False:\n                raise ValueError('Unable to avoid copy when changing dtype')\n            obj = obj.astype(dtype)\n        return obj.copy() if copy else obj\n    if copy is False:\n        raise ValueError('Unable to avoid copy when converting a non-dask object to dask')\n    obj = np.array(obj, dtype=dtype, copy=True)\n    return da.from_array(obj)",
    "docstring": "Array API compatibility wrapper for asarray(). See the corresponding documentation in the array library and/or the array API specification for more details.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_aliases.py",
    "ast_data": "FunctionDef name:asarray arguments arg arg arg arg arg Call If Call If BoolOp Compare Compare If Compare Raise Call Assign Call Return return:yes Call If Compare Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "private_bytes",
    "source_code": "@abc.abstractmethod\ndef private_bytes(self, encoding: _serialization.Encoding, format: _serialization.PrivateFormat, encryption_algorithm: _serialization.KeySerializationEncryption) -> bytes:\n    pass",
    "docstring": "Returns the key serialized as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:private_bytes arg:self arg:encoding arg:format arg:encryption_algorithm arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_shared_object_disabled",
    "source_code": "def _shared_object_disabled():\n    return getattr(SHARED_OBJECT_DISABLED, 'disabled', False)",
    "docstring": "Get whether shared object handling is disabled in a threadsafe manner.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:_shared_object_disabled arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_unary_op_flops",
    "source_code": "def _unary_op_flops(graph, node, ops_per_element=1):\n    in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n    in_shape.assert_is_fully_defined()\n    return ops.OpStats('flops', in_shape.num_elements() * ops_per_element)",
    "docstring": "Common code which compute flops for unary operations.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_unary_op_flops arg:graph arg:node arg:ops_per_element arguments arg arg arg Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_check_field_name",
    "source_code": "def _check_field_name(self):\n    if self.name is None:\n        return []\n    if self.name.endswith('_'):\n        return [checks.Error('Field names must not end with an underscore.', obj=self, id='fields.E001')]\n    elif LOOKUP_SEP in self.name:\n        return [checks.Error('Field names must not contain \"%s\".' % LOOKUP_SEP, obj=self, id='fields.E002')]\n    elif self.name == 'pk':\n        return [checks.Error(\"'pk' is a reserved word that cannot be used as a field name.\", obj=self, id='fields.E003')]\n    else:\n        return []",
    "docstring": "Check if field name is valid, i.e. 1) does not end with an underscore, 2) does not contain \"__\" and 3) is not \"pk\".",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:_check_field_name arg:self arguments arg If Compare Return return:no If Call Return return:yes Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, path: Union[str, os.PathLike], single_file_per_rank: bool=True, sync_files: bool=True, thread_count: int=1, per_thread_copy_ahead: int=10000000, overwrite: bool=True, _extensions: Optional[Sequence[StreamTransformExtension]]=None, serialization_format: SerializationFormat=SerializationFormat.TORCH_SAVE, **kwargs) -> None:\n    super().__init__(path, single_file_per_rank, sync_files, thread_count, per_thread_copy_ahead, overwrite=overwrite, _extensions=_extensions, serialization_format=serialization_format)\n    self.fs = FileSystem()\n    self.path = self.fs.init_path(path, **kwargs)",
    "docstring": "Initialize the writer pointing to . Args: path: directory where the checkpoint will be written to. single_file_per_rank: Produce one file per rank instead of one file per tensor/blob. Default to True. sync_files : force files to be synced to permanent storage. Default to True. thread_count: Number of IO threads to use to write. Default to 1. per_thread_copy_ahead: How many bytes to copy from the GPU ahead of saving then. Default 10Mb. overwrite: Whether to allow overwriting existing checkpoints. Defaults to True. _extensions: Extensions to apply to output streams (EXPERIMENTAL) N. B. If sync_files is disabled, there's no guarantee that the checkpoint will be consistent in the case of a failure.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_fsspec_filesystem.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arg:single_file_per_rank arg:sync_files arg:thread_count arg:per_thread_copy_ahead arg:overwrite arg:_extensions arg:serialization_format arguments arg arg arg arg arg arg arg arg arg arg Call Call Assign Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_plot_ice_lines",
    "source_code": "def _plot_ice_lines(self, preds, feature_values, n_ice_to_plot, ax, pd_plot_idx, n_total_lines_by_plot, individual_line_kw):\n    rng = check_random_state(self.random_state)\n    ice_lines_idx = rng.choice(preds.shape[0], n_ice_to_plot, replace=False)\n    ice_lines_subsampled = preds[ice_lines_idx, :]\n    for ice_idx, ice in enumerate(ice_lines_subsampled):\n        line_idx = np.unravel_index(pd_plot_idx * n_total_lines_by_plot + ice_idx, self.lines_.shape)\n        self.lines_[line_idx] = ax.plot(feature_values, ice.ravel(), **individual_line_kw)[0]",
    "docstring": "Plot the ICE lines. Parameters ---------- preds : ndarray of shape (n_instances, n_grid_points) The predictions computed for all points of for a given feature for all samples in . feature_values : ndarray of shape (n_grid_points,) The feature values for which the predictions have been computed. n_ice_to_plot : int The number of ICE lines to plot. ax : Matplotlib axes The axis on which to plot the ICE lines. pd_plot_idx : int The sequential index of the plot. It will be unraveled to find the matching 2D position in the grid layout. n_total_lines_by_plot : int The total number of lines expected to be plot on the axis. individual_line_kw : dict Dict with keywords passed when plotting the ICE lines.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\inspection\\_plot\\partial_dependence.py",
    "ast_data": "FunctionDef name:_plot_ice_lines arg:self arg:preds arg:feature_values arg:n_ice_to_plot arg:ax arg:pd_plot_idx arg:n_total_lines_by_plot arg:individual_line_kw arguments arg arg arg arg arg arg arg arg Assign Call Assign Call Assign For Call Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "deserialize_symexpr",
    "source_code": "def deserialize_symexpr(self, code: str) -> Union[SymInt, SymFloat, SymBool]:\n    args = {str(e): SymInt(SymNode(e, self, int, int(val), fx_node=None)) for e, val in self.var_to_val.items()}\n    return eval(code, SYMPY_INTERP, args)",
    "docstring": "To be used by compile_fx to deserialize symexprs",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:deserialize_symexpr arg:self arg:code arguments arg arg Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_gid",
    "source_code": "def get_gid(self):\n    return self._gid",
    "docstring": "Return the group id.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\artist.py",
    "ast_data": "FunctionDef name:get_gid arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_shape_tensor",
    "source_code": "def _shape_tensor(shape):\n    if isinstance(shape, (tuple, list)) and (not shape):\n        dtype = dtypes.int64\n    else:\n        dtype = None\n    return ops.convert_to_tensor(shape, dtype=dtype, name='shape')",
    "docstring": "Convert to an int32 or int64 tensor, defaulting to int64 if empty.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:_shape_tensor arg:shape arguments arg If BoolOp Call Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "LowerCholeskyTransform",
    "source_code": "class LowerCholeskyTransform(Transform):\n    domain = constraints.independent(constraints.real, 2)\n    codomain = constraints.lower_cholesky\n\n    def __eq__(self, other):\n        return isinstance(other, LowerCholeskyTransform)\n\n    def _call(self, x):\n        return x.tril(-1) + x.diagonal(dim1=-2, dim2=-1).exp().diag_embed()\n\n    def _inverse(self, y):\n        return y.tril(-1) + y.diagonal(dim1=-2, dim2=-1).log().diag_embed()",
    "docstring": "Transform from unconstrained matrices to lower-triangular matrices with nonnegative diagonal entries. This is useful for parameterizing positive definite matrices in terms of their Cholesky factorization.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "ClassDef name:LowerCholeskyTransform Assign Call Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:_call arg:self arg:x arguments arg arg Return return:yes Call Call Call Call FunctionDef name:_inverse arg:self arg:y arguments arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "MonitoredHeaderMap",
    "source_code": "class MonitoredHeaderMap(_httputil.HeaderMap):\n\n    def transform_key(self, key):\n        self.accessed_headers.add(key)\n        return super(MonitoredHeaderMap, self).transform_key(key)\n\n    def __init__(self):\n        self.accessed_headers = set()\n        super(MonitoredHeaderMap, self).__init__()",
    "docstring": "An access-tracked HTTP header mapping.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "ClassDef name:MonitoredHeaderMap FunctionDef name:transform_key arg:self arg:key arguments arg arg Call Return return:yes Call Call FunctionDef name:__init__ arg:self arguments arg Assign Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__get__",
    "source_code": "def __get__(self, obj, klass=None):\n    if klass is None:\n        klass = type(obj)\n    return self.fget.__get__(obj, klass)()",
    "docstring": "Return property value.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_helper.py",
    "ast_data": "FunctionDef name:__get__ arg:self arg:obj arg:klass arguments arg arg arg If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "receive_data_chunk",
    "source_code": "def receive_data_chunk(self, raw_data, start):\n    if self.activated:\n        self.file.write(raw_data)\n    else:\n        return raw_data",
    "docstring": "Add the data to the BytesIO file.",
    "type": "method",
    "file_path": "django\\django\\core\\files\\uploadhandler.py",
    "ast_data": "FunctionDef name:receive_data_chunk arg:self arg:raw_data arg:start arguments arg arg arg If Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_legacy_structure",
    "source_code": "def convert_legacy_structure(output_types, output_shapes, output_classes):\n    flat_types = nest.flatten(output_types)\n    flat_shapes = nest.flatten(output_shapes)\n    flat_classes = nest.flatten(output_classes)\n    flat_ret = []\n    for flat_type, flat_shape, flat_class in zip(flat_types, flat_shapes, flat_classes):\n        if isinstance(flat_class, type_spec.TypeSpec):\n            flat_ret.append(flat_class)\n        elif issubclass(flat_class, sparse_tensor.SparseTensor):\n            flat_ret.append(sparse_tensor.SparseTensorSpec(flat_shape, flat_type))\n        elif issubclass(flat_class, tensor_lib.Tensor):\n            flat_ret.append(tensor_lib.TensorSpec(flat_shape, flat_type))\n        elif issubclass(flat_class, tensor_array_ops.TensorArray):\n            flat_ret.append(tensor_array_ops.TensorArraySpec(flat_shape[2:], flat_type, dynamic_size=tensor_shape.dimension_value(flat_shape[0]), infer_shape=tensor_shape.dimension_value(flat_shape[1])))\n        else:\n            raise TypeError('Could not build a structure for output class {}. Make sure any component class in `output_classes` inherits from one of the following classes: `tf.TypeSpec`, `tf.sparse.SparseTensor`, `tf.Tensor`, `tf.TensorArray`.'.format(flat_class.__name__))\n    return nest.pack_sequence_as(output_classes, flat_ret)",
    "docstring": "Returns a that represents the given legacy structure. This method provides a way to convert from the existing and structure-related properties to a object. A \"legacy\" structure is represented by the , , and properties. TODO(b/110122868): Remove this function once is used throughout . Args: output_types: A nested structure of objects corresponding to each component of a structured value. output_shapes: A nested structure of objects corresponding to each component a structured value. output_classes: A nested structure of Python objects corresponding to each component of a structured value. Returns: A . Raises: TypeError: If a structure cannot be built from the arguments, because one of the component classes in is not supported.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py",
    "ast_data": "FunctionDef name:convert_legacy_structure arg:output_types arg:output_shapes arg:output_classes arguments arg arg arg Assign Call Assign Call Assign Call Assign For Call If Call Call If Call Call Call If Call Call Call If Call Call Call Call Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "zeros_and_scatter",
    "source_code": "@torch.library.custom_op('flex_lib::zeros_and_scatter', mutates_args=())\ndef zeros_and_scatter(shape: list[int], indices: list[Tensor], vals: Tensor) -> Tensor:\n    grad = torch.zeros(shape, device=vals.device, dtype=vals.dtype)\n    return torch.ops.aten.index_put(grad, indices, vals, accumulate=True)",
    "docstring": "Custom Op so that we can register a custom lowering for the new_output + scatter in the backwards pass",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\_trace_wrapped_higher_order_op.py",
    "ast_data": "FunctionDef name:zeros_and_scatter arg:shape arg:indices arg:vals arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "DevicePlacementSpec",
    "source_code": "@dataclass\nclass DevicePlacementSpec(PlacementSpec):\n    device: torch.distributed._remote_device\n\n    def __post_init__(self):\n        if not isinstance(self.device, torch.distributed._remote_device):\n            self.device = torch.distributed._remote_device(self.device)",
    "docstring": "Associates placement of an entity with a single device. Args: device(:class:): The device to place the entity on.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\api.py",
    "ast_data": "ClassDef name:DevicePlacementSpec FunctionDef name:__post_init__ arg:self arguments arg If Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "l1_loss",
    "source_code": "@elementwise_type_promotion_wrapper(type_promoting_args=('input', 'target'), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT)\ndef l1_loss(input: TensorLikeType, target: TensorLikeType, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> TensorLikeType:\n    if size_average is not None or reduce is not None:\n        reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce)\n    _check_reduction_value(reduction)\n    loss = torch.abs(input - target)\n    return _apply_loss_reduction(loss, reduction)",
    "docstring": "Reference implementation of torch.nn.functional.l1_loss",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py",
    "ast_data": "FunctionDef name:l1_loss arg:input arg:target arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg If BoolOp Compare Compare Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "__tensor_flatten__",
    "source_code": "def __tensor_flatten__(self):\n    return (['_local_tensor'], (self._spec, self.requires_grad))",
    "docstring": "protocol to inform how to flatten a DTensor to local tensor for PT2 tracing",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py",
    "ast_data": "FunctionDef name:__tensor_flatten__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "buffers",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef buffers(self) -> Iterator[torch.Tensor]:\n    for _, buf in self.named_buffers():\n        yield buf",
    "docstring": "Returns an iterator over original module buffers.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\exported_program.py",
    "ast_data": "FunctionDef name:buffers arg:self arguments arg For Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_longitude_grid",
    "source_code": "def set_longitude_grid(self, degrees):\n    grid = np.arange(-180 + degrees, 180, degrees)\n    self.xaxis.set_major_locator(FixedLocator(np.deg2rad(grid)))\n    self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))",
    "docstring": "Set the number of degrees between each longitude grid.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py",
    "ast_data": "FunctionDef name:set_longitude_grid arg:self arg:degrees arguments arg arg Assign Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, logdir, options=None):\n    self._logdir = logdir\n    self._options = options",
    "docstring": "Creates a context manager object for profiler API. Args: logdir: profile data will save to this directory. options: An optional can be provided to fine tune the profiler's behavior.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\profiler_v2.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:logdir arg:options arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "_get_output_tensor",
    "source_code": "def _get_output_tensor(self, op_type, tensor, checked_tensor, is_v1_graph_mode):\n    if is_v1_graph_mode:\n        if op_type == b'Placeholder':\n            self._placeholder_to_debug_tensor[tensor] = checked_tensor\n            return tensor\n        else:\n            return checked_tensor\n    else:\n        return tensor",
    "docstring": "Determine what tensor to output from callback. Args: op_type: Type of the op that outputs the original symbolic tensor, as . tensor: The original output symbolic tensor. checked_tensor: The debugger-instrumented, numerics-checking tensor. is_v1_graph_mode: Whether the debugged proggram is running under V1 graph mode. Returns: A symbolic tensor to be returned by the dumping op_callback.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\check_numerics_callback.py",
    "ast_data": "FunctionDef name:_get_output_tensor arg:self arg:op_type arg:tensor arg:checked_tensor arg:is_v1_graph_mode arguments arg arg arg arg arg If If Compare Assign Return return:yes Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "maybe_rechunk",
    "source_code": "def maybe_rechunk(series: pd.Series, *, allow_copy: bool) -> pd.Series | None:\n    if not isinstance(series.dtype, pd.ArrowDtype):\n        return None\n    chunked_array = series.array._pa_array\n    if len(chunked_array.chunks) == 1:\n        return None\n    if not allow_copy:\n        raise RuntimeError('Found multi-chunk pyarrow array, but `allow_copy` is False. Please rechunk the array before calling this function, or set `allow_copy=True`.')\n    arr = chunked_array.combine_chunks()\n    return pd.Series(arr, dtype=series.dtype, name=series.name, index=series.index)",
    "docstring": "Rechunk a multi-chunk pyarrow array into a single-chunk array, if necessary. - Returns if the input series is not backed by a multi-chunk pyarrow array (and so doesn't need rechunking) - Returns a single-chunk-backed-Series if the input is backed by a multi-chunk pyarrow array and is . - Raises a if is and input is a based by a multi-chunk pyarrow array.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\interchange\\utils.py",
    "ast_data": "FunctionDef name:maybe_rechunk arg:series arguments arg arg If Call Return return:no Assign If Compare Call Return return:no If Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "update_names",
    "source_code": "def update_names(tensor, names, rename_map, inplace):\n    has_names = len(names) > 0\n    has_rename_pairs = bool(rename_map)\n    if has_names and has_rename_pairs:\n        raise RuntimeError(f'{namer_api_name(inplace)}: This function takes either positional args or keyword args, but not both. Use tensor.{namer_api_name(inplace)}(*names) to name dims and tensor.{namer_api_name(inplace)}(**rename_map) to rename dims.')\n    if not has_names and (not has_rename_pairs):\n        return update_names_with_list(tensor, names, inplace)\n    if has_names:\n        return update_names_with_list(tensor, names, inplace)\n    return update_names_with_mapping(tensor, rename_map, inplace)",
    "docstring": "There are two usages: tensor.rename(*names) returns a view on tensor with named dims . must be of length ; otherwise, if '...' is in , then it is expanded greedily to be equal to the corresponding names from . For example, tensor.rename(**rename_map) returns a view on tensor that has rename dims as specified in the mapping . For example, Finally, tensor.rename has an in-place version called tensor.rename_.",
    "type": "function",
    "file_path": "pytorch\\torch\\_namedtensor_internals.py",
    "ast_data": "FunctionDef name:update_names arg:tensor arg:names arg:rename_map arg:inplace arguments arg arg arg arg Assign Compare Call Assign Call If BoolOp Raise Call Call Call Call If BoolOp Return return:yes Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_model_ready",
    "source_code": "def _model_ready(self, sess: session.Session) -> Tuple[bool, Optional[str]]:\n    return _ready(self._ready_op, sess, 'Model not ready')",
    "docstring": "Checks if the model is ready or not. Args: sess: A . Returns: A tuple (is_ready, msg), where is_ready is True if ready and False otherwise, and msg is if the model is ready, a with the reason why it is not ready otherwise.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_manager.py",
    "ast_data": "FunctionDef name:_model_ready arg:self arg:sess arguments arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "response_delete",
    "source_code": "def response_delete(self, request, obj_display, obj_id):\n    if IS_POPUP_VAR in request.POST:\n        popup_response_data = json.dumps({'action': 'delete', 'value': str(obj_id)})\n        return TemplateResponse(request, self.popup_response_template or ['admin/%s/%s/popup_response.html' % (self.opts.app_label, self.opts.model_name), 'admin/%s/popup_response.html' % self.opts.app_label, 'admin/popup_response.html'], {'popup_response_data': popup_response_data})\n    self.message_user(request, _('The %(name)s “%(obj)s” was deleted successfully.') % {'name': self.opts.verbose_name, 'obj': obj_display}, messages.SUCCESS)\n    if self.has_change_permission(request, None):\n        post_url = reverse('admin:%s_%s_changelist' % (self.opts.app_label, self.opts.model_name), current_app=self.admin_site.name)\n        preserved_filters = self.get_preserved_filters(request)\n        post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': self.opts}, post_url)\n    else:\n        post_url = reverse('admin:index', current_app=self.admin_site.name)\n    return HttpResponseRedirect(post_url)",
    "docstring": "Determine the HttpResponse for the delete_view stage.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:response_delete arg:self arg:request arg:obj_display arg:obj_id arguments arg arg arg arg If Compare Assign Call Call Return return:yes Call BoolOp Call Call If Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "min",
    "source_code": "def min(self, *, skipna: bool=True, **kwargs):\n    nv.validate_minmax_axis(kwargs.get('axis', 0))\n    nv.validate_min((), kwargs)\n    self.check_for_ordered('min')\n    if not len(self._codes):\n        return self.dtype.na_value\n    good = self._codes != -1\n    if not good.all():\n        if skipna and good.any():\n            pointer = self._codes[good].min()\n        else:\n            return np.nan\n    else:\n        pointer = self._codes.min()\n    return self._wrap_reduction_result(None, pointer)",
    "docstring": "The minimum value of the object. Only ordered have a minimum! Raises ------ TypeError If the is not . Returns ------- min : the minimum of this , NA value if empty",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:min arg:self arguments arg arg arg Call Call Call Call If Call Return return:yes Assign Compare If Call If BoolOp Call Assign Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "num_accumulated",
    "source_code": "def num_accumulated(self, name=None):\n    if name is None:\n        name = '%s_NumAccumulated' % self._name\n    return gen_data_flow_ops.accumulator_num_accumulated(self._accumulator_ref, name=name)",
    "docstring": "Number of gradients that have currently been aggregated in accumulator. Args: name: Optional name for the operation. Returns: Number of accumulated gradients currently in accumulator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:num_accumulated arg:self arg:name arguments arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "clone",
    "source_code": "def clone(self) -> 'PinholeCamera':\n    height: Tensor = self.height.clone()\n    width: Tensor = self.width.clone()\n    intrinsics: Tensor = self.intrinsics.clone()\n    extrinsics: Tensor = self.extrinsics.clone()\n    return PinholeCamera(intrinsics, extrinsics, height, width)",
    "docstring": "Return a deep copy of the current object instance.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:clone arg:self arguments arg Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_reduction_meta",
    "source_code": "def _reduction_meta(inp, dims, *, output_dtype=None):\n    assert isinstance(inp, TensorLike)\n    if output_dtype is None:\n        output_dtype = inp.dtype\n    output_shape = utils.compute_reduction_output_shape(inp.shape, dims)\n    return TensorMeta(shape=output_shape, strides=utils.make_contiguous_strides_for(output_shape), dtype=output_dtype, device=inp.device)",
    "docstring": "Meta function for single output reduction operations Stride logic is incorrect",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims\\__init__.py",
    "ast_data": "FunctionDef name:_reduction_meta arg:inp arg:dims arguments arg arg arg Call If Compare Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "input_spec",
    "source_code": "@property\ndef input_spec(self):\n    return self._input_spec",
    "docstring": "instance(s) describing the input format for this layer. When you create a layer subclass, you can set to enable the layer to run input compatibility checks when it is called. Consider a layer: it can only be called on a single input tensor of rank 4. As such, you can set, in : Now, if you try to call the layer on an input that isn't rank 4 (for instance, an input of shape , it will raise a nicely-formatted error: Input checks that can be specified via include: - Structure (e.g. a single input, a list of 2 inputs, etc) - Shape - Rank (ndim) - Dtype For more information, see . Returns: A instance, or nested structure thereof.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:input_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_input_names",
    "source_code": "def get_input_names(self):\n    return self._input_names",
    "docstring": "Returns keys to name inputs by. In case inputs provided were a list, tuple or single entry, we make up a key 'input_%d'. For dictionary case, we return a sorted list of keys.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:get_input_names arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "apply",
    "source_code": "def apply(self, func, *args, **kwargs) -> Series:\n    return super().apply(func, *args, **kwargs)",
    "docstring": "Apply function `gotchas.udf-mutationapplyapplyapplyapplyapplya transform `) when compared to the input. >>> g2.apply(lambda x: x.max() - x.min()) a 1 b 0 dtype: int64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\generic.py",
    "ast_data": "FunctionDef name:apply arg:self arg:func arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "normalize",
    "source_code": "def normalize(self, clone=False):\n    if clone:\n        clone = self.clone()\n        capi.geos_normalize(clone.ptr)\n        return clone\n    capi.geos_normalize(self.ptr)",
    "docstring": "Convert this Geometry to normal form (or canonical form). If the keyword is set, then the geometry is not modified and a normalized clone of the geometry is returned instead.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:normalize arg:self arg:clone arguments arg arg If Assign Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "rename",
    "source_code": "def rename(self, name, *, inplace: bool=False) -> Self | None:\n    return self.set_names([name], inplace=inplace)",
    "docstring": "Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if `names`.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:rename arg:self arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "StringFromGeom",
    "source_code": "class StringFromGeom(GEOSFuncFactory):\n    argtypes = [GEOM_PTR]\n    restype = geos_char_p\n    errcheck = staticmethod(check_string)",
    "docstring": "Argument is a Geometry, return type is a string.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\geom.py",
    "ast_data": "ClassDef name:StringFromGeom Assign Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_signature_def_fn",
    "source_code": "@abc.abstractmethod\ndef _get_signature_def_fn(self):\n    pass",
    "docstring": "Returns a function that produces a SignatureDef given desired outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "FunctionDef name:_get_signature_def_fn arg:self arguments arg"
  },
  {
    "library": "pandas",
    "name": "_unbox",
    "source_code": "@final\ndef _unbox(self, other) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray:\n    if lib.is_scalar(other):\n        other = self._unbox_scalar(other)\n    else:\n        self._check_compatible_with(other)\n        other = other._ndarray\n    return other",
    "docstring": "Unbox either a scalar with _unbox_scalar or an instance of our own type.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_unbox arg:self arg:other arguments arg arg If Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, log_interval: float=5, data_collect_interval: float=1, is_debug_mode: bool=False, pynvml_enabled: bool=False, amdsmi_enabled: bool=False) -> None:\n    self._log_interval = log_interval\n    self._data_collect_interval = data_collect_interval\n    self._metadata = UtilizationMetadata(level='metadata', usage_collect_interval=self._data_collect_interval, data_model_version=getDataModelVersion(), job_id=_job_id, job_name=_job_name, workflow_id=_workflow_run_id, workflow_name=_workflow_name, start_at=getTsNow())\n    self._has_pynvml = pynvml_enabled\n    self._has_amdsmi = amdsmi_enabled\n    self._gpu_handles: list[Any] = []\n    self._gpu_lib_detected: str = ''\n    self._num_of_cpus = 0\n    self._debug_mode = is_debug_mode\n    self._initial_gpu_handler()\n    self.shared_resource = SharedResource()\n    self.exit_event = threading.Event()",
    "docstring": "log_interval: Time interval in seconds for collecting usage data; default is 5 seconds. is_debug_mode: Useful if you're testing on a local machine and want to see the output in a pretty format with more information.",
    "type": "method",
    "file_path": "pytorch\\tools\\stats\\monitor.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:log_interval arg:data_collect_interval arg:is_debug_mode arg:pynvml_enabled arg:amdsmi_enabled arguments arg arg arg arg arg arg Assign Assign Assign Call Call Call Assign Assign Assign Assign Call Assign Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "_make_edges_3d",
    "source_code": "def _make_edges_3d(n_x, n_y, n_z=1):\n    vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))\n    edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel()))\n    edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel()))\n    edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))\n    edges = np.hstack((edges_deep, edges_right, edges_down))\n    return edges",
    "docstring": "Returns a list of edges for a 3D image. Parameters ---------- n_x : int The size of the grid in the x direction. n_y : int The size of the grid in the y direction. n_z : integer, default=1 The size of the grid in the z direction, defaults to 1",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\image.py",
    "ast_data": "FunctionDef name:_make_edges_3d arg:n_x arg:n_y arg:n_z arguments arg arg arg Assign Call Call Assign Call Call Call Assign Call Call Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_sym_solve",
    "source_code": "def _sym_solve(Dinv, A, r1, r2, solve):\n    r = r2 + A.dot(Dinv * r1)\n    v = solve(r)\n    u = Dinv * (A.T.dot(v) - r1)\n    return (u, v)",
    "docstring": "An implementation of [4] equation 8.31 and 8.32 References ---------- .. [4] Andersen, Erling D., and Knud D. Andersen. \"The MOSEK interior point optimizer for linear programming: an implementation of the homogeneous algorithm.\" High performance optimization. Springer US, 2000. 197-232.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_linprog_ip.py",
    "ast_data": "FunctionDef name:_sym_solve arg:Dinv arg:A arg:r1 arg:r2 arg:solve arguments arg arg arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "add_votes",
    "source_code": "def add_votes(self, votes):\n    self.votes.update(votes)",
    "docstring": "Add single vote per item to self.votes. Parameter can be any iterable.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:add_votes arg:self arg:votes arguments arg arg Call"
  },
  {
    "library": "kornia",
    "name": "Prompts",
    "source_code": "@dataclass\nclass Prompts:\n    points: Optional[tuple[Tensor, Tensor]] = None\n    boxes: Optional[Tensor] = None\n    masks: Optional[Tensor] = None\n\n    def __post_init__(self) -> None:\n        if isinstance(self.keypoints, Tensor) and isinstance(self.boxes, Tensor):\n            KORNIA_CHECK(self.keypoints.shape[0] == self.boxes.shape[0], 'The prompts should have the same batch size!')\n\n    @property\n    def keypoints(self) -> Optional[Tensor]:\n        return self.points[0] if isinstance(self.points, tuple) else None\n\n    @property\n    def keypoints_labels(self) -> Optional[Tensor]:\n        return self.points[1] if isinstance(self.points, tuple) else None",
    "docstring": "Encapsulate the prompts inputs for a Model. Args: points: A tuple with the keypoints (coordinates x, y) and their respective labels. Shape :math: for the keypoints, and :math: boxes: Batched box inputs, with shape :math:. Expected to be into xyxy format. masks: Batched mask prompts to the model with shape :math:",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\models\\structures.py",
    "ast_data": "ClassDef name:Prompts FunctionDef name:__post_init__ arg:self arguments arg If BoolOp Call Call Call Compare FunctionDef name:keypoints arg:self arguments arg Return return:yes Call FunctionDef name:keypoints_labels arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_normalize_input",
    "source_code": "@staticmethod\ndef _normalize_input(x: torch.Tensor, eps: float=1e-06) -> torch.Tensor:\n    if not is_mps_tensor_safe(x):\n        sp, mp = torch.std_mean(x, dim=(-3, -2, -1), keepdim=True)\n    else:\n        mp = torch.mean(x, dim=(-3, -2, -1), keepdim=True)\n        sp = torch.std(x, dim=(-3, -2, -1), keepdim=True)\n    return (x - mp.detach()) / (sp.detach() + eps)",
    "docstring": "Normalize the input by batch.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\hardnet.py",
    "ast_data": "FunctionDef name:_normalize_input arg:x arg:eps arguments arg arg If Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_paginate_orphans",
    "source_code": "def get_paginate_orphans(self):\n    return self.paginate_orphans",
    "docstring": "Return the maximum number of orphans extend the last page by when paginating.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\list.py",
    "ast_data": "FunctionDef name:get_paginate_orphans arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "SphinxTransform",
    "source_code": "class SphinxTransform(Transform):\n\n    @property\n    def app(self) -> Sphinx:\n        return self.env.app\n\n    @property\n    def env(self) -> BuildEnvironment:\n        return self.document.settings.env\n\n    @property\n    def config(self) -> Config:\n        return self.env.config",
    "docstring": "A base class of Transforms. Compared with ``, this class improves accessibility to Sphinx APIs.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\transforms\\__init__.py",
    "ast_data": "ClassDef name:SphinxTransform FunctionDef name:app arg:self arguments arg Return return:yes FunctionDef name:env arg:self arguments arg Return return:yes FunctionDef name:config arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "transform_atomic_function",
    "source_code": "def transform_atomic_function(rt: runtime_client.Runtime, f: function_lib.AtomicFunction, transform_fn: Union[_FunctionDefTransformerType, list[_FunctionDefTransformerType]], mlir_pipeline: Union[str, list[str]]) -> function_lib.AtomicFunction:\n    transform_fns = transform_fn if isinstance(transform_fn, list) else [transform_fn]\n    mlir_pipelines = mlir_pipeline if isinstance(mlir_pipeline, list) else [mlir_pipeline]\n    for mlir_pipeline in mlir_pipelines:\n        rt.TransformFunction(f.cached_definition.signature.name, mlir_pipeline)\n    fndef = rt.GetFunctionProto(f.cached_definition.signature.name)\n    for transform_fn in transform_fns:\n        transform_fn(fndef)\n    rt.CreateFunction(fndef)\n    graph = ops.get_default_graph()\n    with graph.as_default():\n        func_graph = function_def_lib.function_def_to_graph(fndef, structured_input_signature=f.graph.structured_input_signature, structured_outputs=f.graph.structured_outputs, propagate_device_spec=True)\n    atomic = function_lib.from_func_graph(fndef.signature.name, func_graph, fndef.attr, overwrite=True)\n    return atomic",
    "docstring": "Applies transforms on an AtomicFunction.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\transform\\transform.py",
    "ast_data": "FunctionDef name:transform_atomic_function arg:rt arg:f arg:transform_fn arg:mlir_pipeline arguments arg arg arg arg Assign Call Assign Call For Call Assign Call For Call Call Assign Call With Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "reindex",
    "source_code": "def reindex(self, target, method: ReindexMethod | None=None, level=None, limit: int | None=None, tolerance: float | None=None) -> tuple[Index, npt.NDArray[np.intp] | None]:\n    preserve_names = not hasattr(target, 'name')\n    if is_iterator(target):\n        target = list(target)\n    if not isinstance(target, Index) and len(target) == 0:\n        if level is not None and self._is_multi:\n            idx = self.levels[level]\n        else:\n            idx = self\n        target = idx[:0]\n    else:\n        target = ensure_index(target)\n    if level is not None and (isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex)):\n        if method is not None:\n            raise TypeError('Fill method not supported if level passed')\n        target, indexer, _ = self._join_level(target, level, how='right', keep_order=not self._is_multi)\n    elif self.equals(target):\n        indexer = None\n    elif self._index_as_unique:\n        indexer = self.get_indexer(target, method=method, limit=limit, tolerance=tolerance)\n    elif self._is_multi:\n        raise ValueError('cannot handle a non-unique multi-index!')\n    elif not self.is_unique:\n        raise ValueError('cannot reindex on an axis with duplicate labels')\n    else:\n        indexer, _ = self.get_indexer_non_unique(target)\n    target = self._wrap_reindex_result(target, indexer, preserve_names)\n    return (target, indexer)",
    "docstring": "Create index with target's values. Parameters ---------- target : an iterable An iterable containing the values to be used for creating the new index. method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. level : int, optional Level of multiindex. limit : int, optional Maximum number of consecutive labels in ``abs(index[indexer] - target) >> idx = pd.Index([\"car\", \"bike\", \"train\", \"tractor\"]) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.reindex([\"car\", \"bike\"]) (Index(['car', 'bike'], dtype='object'), array([0, 1]))",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:reindex arg:self arg:target arg:method arg:level arg:limit arg:tolerance arguments arg arg arg arg arg arg Assign Call If Call Assign Call If BoolOp Call Compare Call If BoolOp Compare Assign Assign Assign Assign Call If BoolOp Compare BoolOp Call Call If Compare Raise Call Assign Call If Call Assign If Assign Call If Raise Call If Raise Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "minimum_position",
    "source_code": "def minimum_position(input, labels=None, index=None):\n    dims = np.array(np.asarray(input).shape)\n    dim_prod = np.cumprod([1] + list(dims[:0:-1]))[::-1]\n    result = _select(input, labels, index, find_min_positions=True)[0]\n    if np.isscalar(result):\n        return tuple(result // dim_prod % dims)\n    return [tuple(v) for v in result.reshape(-1, 1) // dim_prod % dims]",
    "docstring": "Find the positions of the minimums of the values of an array at labels. Parameters ---------- input : array_like Array_like of values. labels : array_like, optional An array of integers marking different regions over which the position of the minimum value of is to be computed. must have the same shape as . If is not specified, the location of the first minimum over the whole array is returned. The argument only works when is specified. index : array_like, optional A list of region labels that are taken into account for finding the location of the minima. If is None, the `labelsindexlabelsinputlabelsindexindexlabelsinputlabelsindex`: >>> label, pos = ndimage.label(a) >>> ndimage.minimum_position(a, label, index=np.arange(1, pos+1)) [(2, 0)] >>> label, pos = ndimage.label(b) >>> ndimage.minimum_position(b, label, index=np.arange(1, pos+1)) [(0, 0), (0, 3), (3, 1)]",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_measurements.py",
    "ast_data": "FunctionDef name:minimum_position arg:input arg:labels arg:index arguments arg arg arg Assign Call Call Assign Call Call Assign Call If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "run_fetches_info",
    "source_code": "@property\ndef run_fetches_info(self):\n    output = self._run_fetches_info\n    return output[0] if len(output) == 1 else output",
    "docstring": "Get a str representation of the fetches used in the Session.run() call. Returns: If the information is available from one call, a obtained from . If the information is available from multiple calls, a of from . If the information is not available, .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:run_fetches_info arg:self arguments arg Assign Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "unit_regular_asterisk",
    "source_code": "@classmethod\ndef unit_regular_asterisk(cls, numVertices):\n    return cls.unit_regular_star(numVertices, 0.0)",
    "docstring": "Return a :class: for a unit regular asterisk with the given numVertices and radius of 1.0, centered at (0, 0).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:unit_regular_asterisk arg:cls arg:numVertices arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "eager_learning_phase_scope",
    "source_code": "@tf_contextlib.contextmanager\ndef eager_learning_phase_scope(value):\n    global _GRAPH_LEARNING_PHASES\n    assert value in {0, 1}\n    assert ops.executing_eagerly_outside_functions()\n    global_learning_phase_was_set = global_learning_phase_is_set()\n    if global_learning_phase_was_set:\n        previous_value = learning_phase()\n    try:\n        _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = value\n        yield\n    finally:\n        if global_learning_phase_was_set:\n            _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_value\n        else:\n            del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key]",
    "docstring": "Internal scope that sets the learning phase in eager / tf.function only. Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: None. Raises: ValueError: if is neither nor .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:eager_learning_phase_scope arg:value arguments arg Compare Call Assign Call If Assign Call Try Assign If Assign"
  },
  {
    "library": "tensorflow",
    "name": "_convert_to_ast",
    "source_code": "def _convert_to_ast(n):\n    if isinstance(n, str):\n        return gast.Name(id=n, ctx=None, annotation=None, type_comment=None)\n    if isinstance(n, qual_names.QN):\n        return n.ast()\n    if isinstance(n, list):\n        return [_convert_to_ast(e) for e in n]\n    if isinstance(n, tuple):\n        return tuple((_convert_to_ast(e) for e in n))\n    return n",
    "docstring": "Converts from a known data type to AST.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\templates.py",
    "ast_data": "FunctionDef name:_convert_to_ast arg:n arguments arg If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_graph_connected_component",
    "source_code": "def _graph_connected_component(graph, node_id):\n    n_node = graph.shape[0]\n    if sparse.issparse(graph):\n        graph = graph.tocsr()\n    connected_nodes = np.zeros(n_node, dtype=bool)\n    nodes_to_explore = np.zeros(n_node, dtype=bool)\n    nodes_to_explore[node_id] = True\n    for _ in range(n_node):\n        last_num_component = connected_nodes.sum()\n        np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)\n        if last_num_component >= connected_nodes.sum():\n            break\n        indices = np.where(nodes_to_explore)[0]\n        nodes_to_explore.fill(False)\n        for i in indices:\n            if sparse.issparse(graph):\n                neighbors = graph[[i], :].toarray().ravel()\n            else:\n                neighbors = graph[i]\n            np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)\n    return connected_nodes",
    "docstring": "Find the largest graph connected components that contains one given node. Parameters ---------- graph : array-like of shape (n_samples, n_samples) Adjacency matrix of the graph, non-zero weight means an edge between the nodes. node_id : int The index of the query node of the graph. Returns ------- connected_components_matrix : array-like of shape (n_samples,) An array of bool value indicating the indexes of the nodes belonging to the largest connected components of the given query node.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\manifold\\_spectral_embedding.py",
    "ast_data": "FunctionDef name:_graph_connected_component arg:graph arg:node_id arguments arg arg Assign If Call Assign Call Assign Call Assign Call Assign For Call Assign Call Call If Compare Call Assign Call Call For If Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "register_hook",
    "source_code": "def register_hook(self, hook):\n    if has_torch_function_unary(self):\n        return handle_torch_function(Tensor.register_hook, (self,), self, hook)\n    if not self.requires_grad:\n        raise RuntimeError(\"cannot register a hook on a tensor that doesn't require gradient\")\n    if self._backward_hooks is None:\n        self._backward_hooks = OrderedDict()\n        if self.grad_fn is not None:\n            self.grad_fn._register_hook_dict(self)\n    from torch.utils.hooks import RemovableHandle\n    handle = RemovableHandle(self._backward_hooks)\n    self._backward_hooks[handle.id] = hook\n    return handle",
    "docstring": "Registers a backward hook. The hook will be called every time a gradient with respect to the Tensor is computed. The hook should have the following signature:: hook(grad) -> Tensor or None The hook should not modify its argument, but it can optionally return a new gradient which will be used in place of :attr:. This function returns a handle with a method `backward-hooks-execution` for more information on how when this hook is executed, and how its execution is ordered relative to other hooks. Example:: >>> v = torch.tensor([0., 0., 0.], requires_grad=True) >>> h = v.register_hook(lambda grad: grad * 2) # double the gradient >>> v.backward(torch.tensor([1., 2., 3.])) >>> v.grad 2 4 6 [torch.FloatTensor of size (3,)] >>> h.remove() # removes the hook",
    "type": "method",
    "file_path": "pytorch\\torch\\_tensor.py",
    "ast_data": "FunctionDef name:register_hook arg:self arg:hook arguments arg arg If Call Return return:yes Call If Raise Call If Compare Assign Call If Compare Call Assign Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "shape_as",
    "source_code": "def shape_as(self, obj):\n    if self._zerod:\n        return None\n    return (obj * self._arr.ndim)(*self._arr.shape)",
    "docstring": "Return the shape tuple as an array of some other c-types type. For example: ``.",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\_internal.py",
    "ast_data": "FunctionDef name:shape_as arg:self arg:obj arguments arg arg If Return return:no Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "size",
    "source_code": "def size(self, name=None):\n    if name is None:\n        name = '%s_size' % self._name\n    return self._size_fn(shared_name=self._name, name=name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)",
    "docstring": "Returns the number of elements in the staging area. Args: name: A name for the operation (optional) Returns: The created op",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py",
    "ast_data": "FunctionDef name:size arg:self arg:name arguments arg arg If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "dropout2d",
    "source_code": "def dropout2d(input: Tensor, p: float=0.5, training: bool=True, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(dropout2d, (input,), input, p=p, training=training, inplace=inplace)\n    if p < 0.0 or p > 1.0:\n        raise ValueError(f'dropout probability has to be between 0 and 1, but got {p}')\n    inp_dim = input.dim()\n    if inp_dim not in (3, 4):\n        warn_msg = f'dropout2d: Received a {inp_dim}-D input to dropout2d, which is deprecated and will result in an error in a future release. To retain the behavior and silence this warning, please use dropout instead. Note that dropout2d exists to provide channel-wise dropout on inputs with 2 spatial dimensions, a channel dimension, and an optional batch dimension (i.e. 3D or 4D inputs).'\n        warnings.warn(warn_msg)\n    if inp_dim == 3:\n        warnings.warn('dropout2d: Received a 3D input to dropout2d and assuming that channel-wise 1D dropout behavior is desired - input is interpreted as shape (N, C, L), where C is the channel dim. This behavior will change in a future release to interpret the input as one without a batch dimension, i.e. shape (C, H, W). To maintain the 1D channel-wise dropout behavior, please switch to using dropout1d instead.')\n    result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training)\n    return result",
    "docstring": "Randomly zero out entire channels (a channel is a 2D feature map). For example, the :math:-th channel of the :math:-th sample in the batched input is a 2D tensor :math: of the input tensor. Each channel will be zeroed out independently on every forward call with probability :attr: using samples from a Bernoulli distribution. See :class: for details. Args: p: probability of a channel to be zeroed. Default: 0.5 training: apply dropout if is ``",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:dropout2d arg:input arg:p arg:training arg:inplace arguments arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Raise Call Assign Call If Compare Assign Call If Compare Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_gather_constant_attrs",
    "source_code": "def _gather_constant_attrs(m: torch.nn.Module) -> ConstantAttrMap:\n    constants = ConstantAttrMap()\n    buffers_parameters = set(m.buffers())\n    buffers_parameters.update(m.parameters())\n\n    def inner(m: torch.nn.Module, prefix_atoms: list[str], constants):\n        for k, v in m.__dict__.items():\n            if isinstance(v, (torch.Tensor, torch.ScriptObject, FakeScriptObject)):\n                if v in buffers_parameters:\n                    continue\n                fqn = '.'.join(prefix_atoms + [k])\n                constants.add(v, fqn)\n        for k, v in m.named_children():\n            inner(v, prefix_atoms + [k], constants)\n    inner(m, [], constants)\n    return constants",
    "docstring": "Search the module hierarchy, gathering up all tensor and ScriptObject constants. Returns a dictionary mapping hash(value) to the name of the constant. We have to abuse here unfortunately, see: [ScriptObject hash].",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\non_strict_utils.py",
    "ast_data": "FunctionDef name:_gather_constant_attrs arg:m arguments arg Assign Call Assign Call Call Call Call FunctionDef name:inner arg:m arg:prefix_atoms arg:constants arguments arg arg arg For Call If Call If Compare Assign Call Call For Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "copy",
    "source_code": "def copy(self, name: Hashable | None=None, deep: bool=False) -> Self:\n    name = self._validate_names(name=name, deep=deep)[0]\n    if deep:\n        new_data = self._data.copy()\n        new_index = type(self)._simple_new(new_data, name=name)\n    else:\n        new_index = self._rename(name=name)\n    return new_index",
    "docstring": "Make a copy of this object. Name is set on the new object. Parameters ---------- name : Label, optional Set name for new object. deep : bool, default False If True attempts to make a deep copy of the Index. Else makes a shallow copy. Returns ------- Index Index refer to new object which is a copy of this object. See Also -------- Index.delete: Make new Index with passed location(-s) deleted. Index.drop: Make new Index with passed list of labels deleted. Notes ----- In most cases, there should be no functional difference from using `` is passed it will attempt to deepcopy. Examples -------- >>> idx = pd.Index([\"a\", \"b\", \"c\"]) >>> new_idx = idx.copy() >>> idx is new_idx False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:copy arg:self arg:name arg:deep arguments arg arg arg Assign Call If Assign Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_tensor_to_type",
    "source_code": "def _tensor_to_type(self, output: Union[Tensor, List[Tensor]], output_type: str, is_batch: bool=False) -> Union[Tensor, List[Tensor], List['Image.Image']]:\n    if output_type == 'torch':\n        if is_batch and (not isinstance(output, Tensor)):\n            return stack(output)\n        elif is_batch and isinstance(output, Tensor):\n            return output\n        elif not is_batch and isinstance(output, Tensor):\n            return list(output)\n        elif not is_batch and (not isinstance(output, Tensor)):\n            return output\n        return output\n    elif output_type == 'pil':\n        out = [Image.fromarray((tensor_to_image(out_img) * 255).astype(np.uint8)) for out_img in output]\n        return list(out)\n    raise RuntimeError(f'Unsupported output type `{output_type}`.')",
    "docstring": "Convert the output tensor to the desired type. Args: output: The output tensor or list of tensors. output_type: The desired output type. Accepted values are \"torch\" and \"pil\". is_batch: If True, the output is expected to be a batch of tensors. Returns: The converted output tensor or list of tensors. Raises: RuntimeError: If the output type is not supported.",
    "type": "method",
    "file_path": "kornia\\kornia\\models\\base.py",
    "ast_data": "FunctionDef name:_tensor_to_type arg:self arg:output arg:output_type arg:is_batch arguments arg arg arg arg If Compare If BoolOp Call Return return:yes Call If BoolOp Call Return return:yes If BoolOp Call Return return:yes Call If BoolOp Call Return return:yes Return return:yes If Compare Assign Call Call Call Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_mark_prior_graph_output_as_aliased",
    "source_code": "def _mark_prior_graph_output_as_aliased(self, index: PathOutputIndex) -> None:\n    depth, output_index = index\n    node = list(self._path_from_root)[depth]\n    node.unaliased_in_all_paths[output_index] = False\n    x = self.path_weakrefs[depth][output_index]\n    assert x is not None\n    x.remove_extra_reference()",
    "docstring": "Remove a graph output from the unaliased, cached tensors in an ancestor node",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:_mark_prior_graph_output_as_aliased arg:self arg:index arguments arg arg Assign Assign Call Assign Assign Compare Call"
  },
  {
    "library": "pandas",
    "name": "cast_scalar_indexer",
    "source_code": "def cast_scalar_indexer(val):\n    if lib.is_float(val) and val.is_integer():\n        raise IndexError('Indexing with a float is no longer supported. Manually convert to an integer key instead.')\n    return val",
    "docstring": "Disallow indexing with a float key, even if that key is a round number. Parameters ---------- val : scalar Returns ------- outval : scalar",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:cast_scalar_indexer arg:val arguments arg If BoolOp Call Call Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit_transform",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit_transform(self, X, y=None):\n    self._check_params_vs_input(X)\n    embedding = self._fit(X)\n    self.embedding_ = embedding\n    return self.embedding_",
    "docstring": "Fit X into an embedded space and return that transformed output. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. If the method is 'exact', X may be a sparse matrix of type 'csr', 'csc' or 'coo'. If the method is 'barnes_hut' and the metric is 'precomputed', X may be a precomputed sparse graph. y : None Ignored. Returns ------- X_new : ndarray of shape (n_samples, n_components) Embedding of the training data in low-dimensional space.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\manifold\\_t_sne.py",
    "ast_data": "FunctionDef name:fit_transform arg:self arg:X arg:y arguments arg arg arg Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_format_memory",
    "source_code": "def _format_memory(nbytes):\n    KB = 1024\n    MB = 1024 * KB\n    GB = 1024 * MB\n    if abs(nbytes) >= GB:\n        return f'{nbytes * 1.0 / GB:.2f} Gb'\n    elif abs(nbytes) >= MB:\n        return f'{nbytes * 1.0 / MB:.2f} Mb'\n    elif abs(nbytes) >= KB:\n        return f'{nbytes * 1.0 / KB:.2f} Kb'\n    else:\n        return str(nbytes) + ' b'",
    "docstring": "Return a formatted memory size string.",
    "type": "function",
    "file_path": "pytorch\\torch\\autograd\\profiler_util.py",
    "ast_data": "FunctionDef name:_format_memory arg:nbytes arguments arg Assign Assign Assign If Compare Call Return return:yes If Compare Call Return return:yes If Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "before_content",
    "source_code": "def before_content(self) -> None:\n    prefix = None\n    if self.names:\n        fullname, name_prefix = self.names[-1]\n        if self.allow_nesting:\n            prefix = fullname\n        elif name_prefix:\n            prefix = name_prefix.strip('.')\n    if prefix:\n        self.env.ref_context['py:class'] = prefix\n        if self.allow_nesting:\n            classes = self.env.ref_context.setdefault('py:classes', [])\n            classes.append(prefix)\n    if 'module' in self.options:\n        modules = self.env.ref_context.setdefault('py:modules', [])\n        modules.append(self.env.ref_context.get('py:module'))\n        self.env.ref_context['py:module'] = self.options['module']",
    "docstring": "Handle object nesting before content :py:class: represents Python language constructs. For constructs that are nestable, such as a Python classes, this method will build up a stack of the nesting hierarchy so that it can be later de-nested correctly, in :py:meth:. For constructs that aren't nestable, the stack is bypassed, and instead only the most recent object is tracked. This object prefix name will be removed with :py:meth:.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\python\\_object.py",
    "ast_data": "FunctionDef name:before_content arg:self arguments arg Assign If Assign If Assign If Assign Call If Assign If Assign Call Call If Compare Assign Call Call Call Assign"
  },
  {
    "library": "django",
    "name": "send_mail",
    "source_code": "def send_mail(subject, message, from_email, recipient_list, fail_silently=False, auth_user=None, auth_password=None, connection=None, html_message=None):\n    connection = connection or get_connection(username=auth_user, password=auth_password, fail_silently=fail_silently)\n    mail = EmailMultiAlternatives(subject, message, from_email, recipient_list, connection=connection)\n    if html_message:\n        mail.attach_alternative(html_message, 'text/html')\n    return mail.send()",
    "docstring": "Easy wrapper for sending a single message to a recipient list. All members of the recipient list will see the other recipients in the 'To' field. If from_email is None, use the DEFAULT_FROM_EMAIL setting. If auth_user is None, use the EMAIL_HOST_USER setting. If auth_password is None, use the EMAIL_HOST_PASSWORD setting. Note: The API for this method is frozen. New code wanting to extend the functionality should use the EmailMessage class directly.",
    "type": "function",
    "file_path": "django\\django\\core\\mail\\__init__.py",
    "ast_data": "FunctionDef name:send_mail arg:subject arg:message arg:from_email arg:recipient_list arg:fail_silently arg:auth_user arg:auth_password arg:connection arg:html_message arguments arg arg arg arg arg arg arg arg arg Assign BoolOp Call Assign Call If Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "wrap_graph_module_for_node_meta_preservation",
    "source_code": "def wrap_graph_module_for_node_meta_preservation(graph_module: torch.fx.GraphModule) -> Callable:\n\n    def wrapped(*args):\n        with fx_traceback.preserve_node_meta():\n            return torch.fx.Interpreter(graph_module).run(*args)\n    return wrapped",
    "docstring": "Wrap a GraphModule with contexts to preserve node meta information, such as stacktrace info. This is typically useful before calling . Without this wrapper, the stacktrace information will be lost afterwards.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\_utils.py",
    "ast_data": "FunctionDef name:wrap_graph_module_for_node_meta_preservation arg:graph_module arguments arg FunctionDef name:wrapped arguments arg With Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ListDevices",
    "source_code": "def ListDevices(self):\n    if self._tf_cluster is None:\n        return []\n    return [device_properties_pb2.NamedDevice.FromString(device) for device in tf_cluster.TF_ListDevices(self._tf_cluster)]",
    "docstring": "Returns a list of available hardware devices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\grappler\\cluster.py",
    "ast_data": "FunctionDef name:ListDevices arg:self arguments arg If Compare Return return:no Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_CalibrationAlgorithmBase",
    "source_code": "class _CalibrationAlgorithmBase(abc.ABC):\n\n    def __init__(self, statistics: calib_stats_pb2.CalibrationStatistics, calib_opts: stablehlo_quant_config_pb2.CalibrationOptions):\n        self._statistics = statistics\n        self._calib_opts = calib_opts\n\n    @abc.abstractmethod\n    def get_min_max_value(self) -> tuple[float, float]:\n        pass",
    "docstring": "Abstract base class for calibration algorithm.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py",
    "ast_data": "ClassDef name:_CalibrationAlgorithmBase FunctionDef name:__init__ arg:self arg:statistics arg:calib_opts arguments arg arg arg Assign Assign FunctionDef name:get_min_max_value arg:self arguments arg"
  },
  {
    "library": "numpy",
    "name": "output_def",
    "source_code": "def output_def(dlist, flist, header, file=sys.stdout):\n    for data_sym in dlist:\n        header = header + '\\t%s DATA\\n' % data_sym\n    header = header + '\\n'\n    for func_sym in flist:\n        header = header + '\\t%s\\n' % func_sym\n    file.write(header)",
    "docstring": "Outputs the final DEF file to a file defaulting to stdout. output_def(dlist, flist, header, file = sys.stdout)",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\lib2def.py",
    "ast_data": "FunctionDef name:output_def arg:dlist arg:flist arg:header arg:file arguments arg arg arg arg For Assign Assign For Assign Call"
  },
  {
    "library": "pytorch",
    "name": "MetadataIndex",
    "source_code": "@dataclass(frozen=True)\nclass MetadataIndex:\n    fqn: str\n    'Fully Qualified Name of the object'\n    offset: Optional[torch.Size] = None\n    \"If the object is a tensor, offset into the tensor we're looking for\"\n    index: Optional[int] = field(hash=False, compare=False, default=None)\n    '\\n    Index hint when searching for tensor chunk to speedup lookups (optional)\\n\\n    A common representation of a sharded tensor is as a list of chunks so to\\n    find the index in such a list you need to linear search it.\\n\\n    When constructing an instance of MetadataIndex that points to that list,\\n    one can provide the index as a hint and it will be probed first before\\n    the linear search and thus making it significantly faster.\\n    '\n\n    def __init__(self, fqn: str, offset: Optional[Sequence[int]]=None, index: Optional[int]=None):\n        object.__setattr__(self, 'fqn', fqn)\n        object.__setattr__(self, 'index', index)\n        if offset is not None:\n            object.__setattr__(self, 'offset', torch.Size(offset))",
    "docstring": "This class represents a lookup key for items in a state dict or Metadata.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\metadata.py",
    "ast_data": "ClassDef name:MetadataIndex Call FunctionDef name:__init__ arg:self arg:fqn arg:offset arg:index arguments arg arg arg arg Call Call If Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "SaveableObject",
    "source_code": "class SaveableObject:\n\n    def __init__(self, op, specs, name):\n        self.op = op\n        self.specs = specs\n        self.name = name\n\n    @property\n    def device(self):\n        return self.specs[0].device\n\n    def restore(self, restored_tensors, restored_shapes):\n        raise ValueError('Calling an abstract method.')",
    "docstring": "Base class for saving and restoring saveable objects.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object.py",
    "ast_data": "ClassDef name:SaveableObject FunctionDef name:__init__ arg:self arg:op arg:specs arg:name arguments arg arg arg arg Assign Assign Assign FunctionDef name:device arg:self arguments arg Return return:yes FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "sh_jacobi",
    "source_code": "def sh_jacobi(n, p, q, monic=False):\n    if n < 0:\n        raise ValueError('n must be nonnegative.')\n\n    def wfunc(x):\n        return (1.0 - x) ** (p - q) * x ** (q - 1.0)\n    if n == 0:\n        return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic, eval_func=np.ones_like)\n    n1 = n\n    x, w = roots_sh_jacobi(n1, p, q)\n    hn = _gam(n + 1) * _gam(n + q) * _gam(n + p) * _gam(n + p - q + 1)\n    hn /= (2 * n + p) * _gam(2 * n + p) ** 2\n    kn = 1.0\n    pp = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(0, 1), monic=monic, eval_func=lambda x: _ufuncs.eval_sh_jacobi(n, p, q, x))\n    return pp",
    "docstring": "Shifted Jacobi polynomial. Defined by .. math:: G_n^{(p, q)}(x) = \\binom{2n + p - 1}{n}^{-1}P_n^{(p - q, q - 1)}(2x - 1), where :math: is the nth Jacobi polynomial. Parameters ---------- n : int Degree of the polynomial. p : float Parameter, must have :math:. q : float Parameter, must be greater than 0. monic : bool, optional If , scale the leading coefficient to be 1. Default is . Returns ------- G : orthopoly1d Shifted Jacobi polynomial. Notes ----- For fixed :math:, the polynomials :math: are orthogonal over :math: with weight function :math:.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_orthogonal.py",
    "ast_data": "FunctionDef name:sh_jacobi arg:n arg:p arg:q arg:monic arguments arg arg arg arg If Compare Raise Call FunctionDef name:wfunc arg:x arguments arg Return return:yes If Compare Return return:yes Call Assign Assign Call Assign Call Call Call Call Call Assign Assign Call arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "argmin",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef argmin(x, axis=-1):\n    return math_ops.argmin(x, axis)",
    "docstring": "Returns the index of the minimum value along an axis. Args: x: Tensor or variable. axis: axis along which to perform the reduction. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:argmin arg:x arg:axis arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "english_capitalize",
    "source_code": "def english_capitalize(s):\n    if s:\n        return english_upper(s[0]) + s[1:]\n    else:\n        return s",
    "docstring": "Apply English case rules to convert the first character of an ASCII string to upper case. This is an internal utility function to replace calls to str.capitalize() such that we can avoid changing behavior with changing locales. Parameters ---------- s : str Returns ------- capitalized : str Examples -------- >>> from numpy._core.numerictypes import english_capitalize >>> english_capitalize('int8') 'Int8' >>> english_capitalize('Int8') 'Int8' >>> english_capitalize('') ''",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\_string_helpers.py",
    "ast_data": "FunctionDef name:english_capitalize arg:s arguments arg If Return return:yes Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "set_vary_header",
    "source_code": "def set_vary_header(response, header_name):\n    varies = response.headers.get('Vary', '')\n    varies = [x.strip() for x in varies.split(',') if x.strip()]\n    if header_name not in varies:\n        varies.append(header_name)\n    response.headers['Vary'] = ', '.join(varies)",
    "docstring": "Add a Vary header to a response.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\__init__.py",
    "ast_data": "FunctionDef name:set_vary_header arg:response arg:header_name arguments arg arg Assign Call Assign Call Call Call If Compare Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "transform",
    "source_code": "def transform(self, values):\n    values = np.asanyarray(values)\n    ndim = values.ndim\n    values = values.reshape((-1, self.input_dims))\n    res = self.transform_affine(self.transform_non_affine(values))\n    if ndim == 0:\n        assert not np.ma.is_masked(res)\n        return res[0, 0]\n    if ndim == 1:\n        return res.reshape(-1)\n    elif ndim == 2:\n        return res\n    raise ValueError('Input values must have shape (N, {dims}) or ({dims},)'.format(dims=self.input_dims))",
    "docstring": "Apply this transformation on the given array of *values*. Parameters ---------- values : array-like The input values as an array of length :attr: or shape (N, :attr:). Returns ------- array The output values as an array of length :attr: or shape (N, :attr:), depending on the input.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:transform arg:self arg:values arguments arg arg Assign Call Assign Assign Call Assign Call Call If Compare Call Return return:yes If Compare Return return:yes Call If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "BadNominalFormatting",
    "source_code": "class BadNominalFormatting(ArffException):\n\n    def __init__(self, value):\n        super().__init__()\n        self.message = 'Nominal data value \"%s\" not properly quoted in line ' % value + '%d.'",
    "docstring": "Error raised when a nominal value with space is not properly quoted.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\externals\\_arff.py",
    "ast_data": "ClassDef name:BadNominalFormatting FunctionDef name:__init__ arg:self arg:value arguments arg arg Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "generate_return_type_declarations",
    "source_code": "def generate_return_type_declarations(overloads: Sequence[PythonSignatureNativeFunctionPair]) -> list[str]:\n    typenames: dict[str, str] = {}\n    declarations: list[str] = []\n    for overload in overloads:\n        fieldnames = structseq_fieldnames(overload.function.func.returns)\n        if not fieldnames:\n            continue\n        name = cpp.name(overload.function.func)\n        tn_key = gen_structseq_typename_key(overload.function)\n        typename = typenames.get(tn_key)\n        if typename is None:\n            typename = f'{name}NamedTuple{('' if not declarations else len(declarations))}'\n            typenames[tn_key] = typename\n            declarations.append(f'PyTypeObject* get_{name}_structseq();')\n    return declarations",
    "docstring": "Generate block of function declarations in to initialize and return named tuple for a native function.",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py",
    "ast_data": "FunctionDef name:generate_return_type_declarations arg:overloads arguments arg For Assign Call If Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, tgt_is_causal: Optional[bool]=None, memory_is_causal: bool=False) -> Tensor:\n    output = tgt\n    seq_len = _get_seq_len(tgt, self.layers[0].self_attn.batch_first)\n    tgt_is_causal = _detect_is_causal_mask(tgt_mask, tgt_is_causal, seq_len)\n    for mod in self.layers:\n        output = mod(output, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, tgt_is_causal=tgt_is_causal, memory_is_causal=memory_is_causal)\n    if self.norm is not None:\n        output = self.norm(output)\n    return output",
    "docstring": "Pass the inputs (and mask) through the decoder layer in turn. Args: tgt: the sequence to the decoder (required). memory: the sequence from the last layer of the encoder (required). tgt_mask: the mask for the tgt sequence (optional). memory_mask: the mask for the memory sequence (optional). tgt_key_padding_mask: the mask for the tgt keys per batch (optional). memory_key_padding_mask: the mask for the memory keys per batch (optional). tgt_is_causal: If specified, applies a causal mask as `~torch.nn.Transformer`.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\transformer.py",
    "ast_data": "FunctionDef name:forward arg:self arg:tgt arg:memory arg:tgt_mask arg:memory_mask arg:tgt_key_padding_mask arg:memory_key_padding_mask arg:tgt_is_causal arg:memory_is_causal arguments arg arg arg arg arg arg arg arg arg Assign Assign Call Assign Call For Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "register_tensor_conversion_function_internal",
    "source_code": "def register_tensor_conversion_function_internal(base_type, conversion_func, priority=100):\n    base_types = base_type if isinstance(base_type, tuple) else (base_type,)\n    if any((not isinstance(x, type) for x in base_types)):\n        raise TypeError(f'Argument `base_type` must be a type or a tuple of types. Obtained: {base_type}')\n    del base_types\n    if not callable(conversion_func):\n        raise TypeError(f'Argument `conversion_func` must be callable. Received {conversion_func}.')\n    with _tensor_conversion_func_lock:\n        _tensor_conversion_func_registry[priority].append((base_type, conversion_func))\n        _tensor_conversion_func_cache.clear()",
    "docstring": "Internal version of register_tensor_conversion_function. See docstring of for details. The internal version of the function allows registering conversions for types in the _UNCONVERTIBLE_TYPES tuple. Args: base_type: The base type or tuple of base types for all objects that accepts. conversion_func: A function that converts instances of to . priority: Optional integer that indicates the priority for applying this conversion function. Conversion functions with smaller priority values run earlier than conversion functions with larger priority values. Defaults to 100. Raises: TypeError: If the arguments do not have the appropriate type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_conversion_registry.py",
    "ast_data": "FunctionDef name:register_tensor_conversion_function_internal arg:base_type arg:conversion_func arg:priority arguments arg arg arg Assign Call If Call Call Raise Call If Call Raise Call With Call Call"
  },
  {
    "library": "pandas",
    "name": "ArrowCTypes",
    "source_code": "class ArrowCTypes:\n    NULL = 'n'\n    BOOL = 'b'\n    INT8 = 'c'\n    UINT8 = 'C'\n    INT16 = 's'\n    UINT16 = 'S'\n    INT32 = 'i'\n    UINT32 = 'I'\n    INT64 = 'l'\n    UINT64 = 'L'\n    FLOAT16 = 'e'\n    FLOAT32 = 'f'\n    FLOAT64 = 'g'\n    STRING = 'u'\n    LARGE_STRING = 'U'\n    DATE32 = 'tdD'\n    DATE64 = 'tdm'\n    TIMESTAMP = 'ts{resolution}:{tz}'\n    TIME = 'tt{resolution}'",
    "docstring": "Enum for Apache Arrow C type format strings. The Arrow C data interface:",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\interchange\\utils.py",
    "ast_data": "ClassDef name:ArrowCTypes Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_prepare_font",
    "source_code": "def _prepare_font(self, font_prop):\n    font = get_font(_fontManager._find_fonts_by_props(font_prop))\n    font.clear()\n    size = font_prop.get_size_in_points()\n    font.set_size(size, self.dpi)\n    return font",
    "docstring": "Get the for *font_prop*, clear its buffer, and set its size.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_agg.py",
    "ast_data": "FunctionDef name:_prepare_font arg:self arg:font_prop arguments arg arg Assign Call Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_expand_dims",
    "source_code": "def _expand_dims(t, axis, num_axes=1):\n    if isinstance(num_axes, int):\n        for _ in range(num_axes):\n            t = array_ops.expand_dims(t, axis)\n    else:\n        shape = array_ops.shape(t)\n        ones = array_ops.fill(array_ops.reshape(num_axes, [1]), constant_op.constant(1, shape.dtype))\n        new_shape = array_ops.concat([shape[:1], ones, shape[1:]], axis=0)\n        t = array_ops.reshape(t, new_shape)\n    return t",
    "docstring": "Similar to but supports insertion of multiple axes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_expand_dims arg:t arg:axis arg:num_axes arguments arg arg arg If Call For Call Assign Call Assign Call Assign Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "DefaultStack",
    "source_code": "class DefaultStack(threading.local, Generic[T]):\n\n    def __init__(self):\n        super().__init__()\n        self._enforce_nesting = True\n        self.stack: list[T] = []\n\n    def get_default(self) -> Optional[T]:\n        return self.stack[-1] if self.stack else None\n\n    def reset(self) -> None:\n        self.stack = []\n\n    def is_cleared(self) -> bool:\n        return not self.stack\n\n    @property\n    def enforce_nesting(self) -> bool:\n        return self._enforce_nesting\n\n    @enforce_nesting.setter\n    def enforce_nesting(self, value: bool):\n        self._enforce_nesting = value\n\n    @tf_contextlib.contextmanager\n    def get_controller(self, default: T) -> Iterator[T]:\n        self.stack.append(default)\n        try:\n            yield default\n        finally:\n            if self.stack:\n                if self._enforce_nesting:\n                    if self.stack[-1] is not default:\n                        raise AssertionError('Nesting violated for default stack of %s objects' % type(default))\n                    self.stack.pop()\n                else:\n                    self.stack.remove(default)",
    "docstring": "A thread-local stack of objects for providing implicit defaults.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\stack.py",
    "ast_data": "ClassDef name:DefaultStack FunctionDef name:__init__ arg:self arguments arg Call Call Assign FunctionDef name:get_default arg:self arguments arg Return return:yes FunctionDef name:reset arg:self arguments arg Assign FunctionDef name:is_cleared arg:self arguments arg Return return:yes FunctionDef name:enforce_nesting arg:self arguments arg Return return:yes FunctionDef name:enforce_nesting arg:self arg:value arguments arg arg Assign FunctionDef name:get_controller arg:self arg:default arguments arg arg Call Try If If If Compare Raise Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "width",
    "source_code": "@property\ndef width(self) -> int:\n    width = 0\n    while self.table.cells[self.row, self.col + width] == self.cell_id:\n        width += 1\n    return width",
    "docstring": "Returns the cell width.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\latex.py",
    "ast_data": "FunctionDef name:width arg:self arguments arg Assign While Compare Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "rotate",
    "source_code": "def rotate(self, theta):\n    a = math.cos(theta)\n    b = math.sin(theta)\n    mtx = self._mtx\n    (xx, xy, x0), (yx, yy, y0), _ = mtx.tolist()\n    mtx[0, 0] = a * xx - b * yx\n    mtx[0, 1] = a * xy - b * yy\n    mtx[0, 2] = a * x0 - b * y0\n    mtx[1, 0] = b * xx + a * yx\n    mtx[1, 1] = b * xy + a * yy\n    mtx[1, 2] = b * x0 + a * y0\n    self.invalidate()\n    return self",
    "docstring": "Add a rotation (in radians) to this transform in place. Returns *self*, so this method can easily be chained with more calls to :meth:, :meth:, :meth: and :meth:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:rotate arg:self arg:theta arguments arg arg Assign Call Assign Call Assign Assign Call Assign Assign Assign Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_enable_thunkify",
    "source_code": "@contextmanager\ndef _enable_thunkify(tracer: _ProxyTracer, *, enable: bool=True) -> Generator[None, None, None]:\n    old = tracer.enable_thunkify\n    tracer.enable_thunkify = enable\n    try:\n        yield\n    finally:\n        tracer.enable_thunkify = old",
    "docstring": "Enable thunkification inside the context manager. Thunkification prevents SymNode computation from directly being traced into an FX graph; instead, the compute is only added to the graph if it is actually used. This helps us track SymNode compute when it is computed (since we need /something/ to put in the tracker) even if it is unlikely to be used.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py",
    "ast_data": "FunctionDef name:_enable_thunkify arg:tracer arguments arg arg Assign Assign Try Assign"
  },
  {
    "library": "scipy",
    "name": "_process_parameters",
    "source_code": "def _process_parameters(self, dim):\n    if dim is None or not np.isscalar(dim) or dim < 0 or (dim != int(dim)):\n        raise ValueError('Dimension of rotation must be specified,and must be a scalar nonnegative integer.')\n    return dim",
    "docstring": "Dimension N must be specified; it cannot be inferred.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_parameters arg:self arg:dim arguments arg arg If BoolOp Compare Call Compare Compare Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_replica_ctx_update",
    "source_code": "def _replica_ctx_update(self, var, fn, args=(), kwargs=None, group=True):\n    replica_context = get_replica_context()\n    if not replica_context:\n        raise ValueError('`StrategyExtended._replica_ctx_update` must be called in a replica context.')\n\n    def merge_fn(_, *merged_args, **merged_kwargs):\n        return self.update(var, fn, merged_args, merged_kwargs, group=group)\n    return replica_context.merge_call(merge_fn, args=args, kwargs=kwargs)",
    "docstring": "Run with and to update .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:_replica_ctx_update arg:self arg:var arg:fn arg:args arg:kwargs arg:group arguments arg arg arg arg arg arg Assign Call If Raise Call FunctionDef name:merge_fn arg:_ arguments arg arg arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Ones",
    "source_code": "@tf_export('ones_initializer', v1=[])\nclass Ones(Initializer):\n\n    def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n        self._validate_kwargs(kwargs)\n        dtype = dtypes.as_dtype(dtype)\n        if not dtype.is_numpy_compatible or dtype == dtypes.string:\n            raise ValueError(f'Argument `dtype` expected to be numeric or boolean. Received {dtype}.')\n        if _PARTITION_SHAPE in kwargs:\n            shape = kwargs[_PARTITION_SHAPE]\n        return array_ops.ones(shape, dtype)",
    "docstring": "Initializer that generates tensors initialized to 1. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Examples: >>> def make_variables(k, initializer): ... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)), ... tf.Variable(initializer(shape=[k, k], dtype=tf.float32))) >>> v1, v2 = make_variables(3, tf.ones_initializer()) >>> v1 >>> v2 >>> make_variables(4, tf.random_uniform_initializer(minval=-1., maxval=1.)) (, <tf.Variable...shape=(4, 4) ...",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "ClassDef name:Ones FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If BoolOp Compare Raise Call If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "is_platform_windows",
    "source_code": "def is_platform_windows() -> bool:\n    return sys.platform in ['win32', 'cygwin']",
    "docstring": "Checking if the running platform is windows. Returns ------- bool True if the running platform is windows.",
    "type": "function",
    "file_path": "pandas\\pandas\\compat\\__init__.py",
    "ast_data": "FunctionDef name:is_platform_windows arguments Return return:yes Compare"
  },
  {
    "library": "scikit-learn",
    "name": "__str__",
    "source_code": "def __str__(self) -> str:\n    return cast(str, self.value)",
    "docstring": "Pretty-print parameterized test names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_backends.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_lerp",
    "source_code": "def _lerp(a, b, t, out=None):\n    diff_b_a = subtract(b, a)\n    lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out))\n    subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5, casting='unsafe', dtype=type(lerp_interpolation.dtype))\n    if lerp_interpolation.ndim == 0 and out is None:\n        lerp_interpolation = lerp_interpolation[()]\n    return lerp_interpolation",
    "docstring": "Compute the linear interpolation weighted by gamma on each point of two same shape array. a : array_like Left bound. b : array_like Right bound. t : array_like The interpolation weight. out : array_like Output array.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_function_base_impl.py",
    "ast_data": "FunctionDef name:_lerp arg:a arg:b arg:t arg:out arguments arg arg arg arg Assign Call Assign Call Call Call Compare Call If BoolOp Compare Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_default_kwargs",
    "source_code": "def _get_default_kwargs(f: Callable) -> 'OrderedDict[str, Any]':\n    kwargs = {}\n    for name, param in signature(f).parameters.items():\n        if param.default is not param.empty:\n            kwargs[name] = param.default\n        elif param.kind is param.VAR_POSITIONAL:\n            kwargs[name] = ()\n        elif param.kind is param.VAR_KEYWORD:\n            kwargs[name] = {}\n    return OrderedDict(kwargs)",
    "docstring": "Get all default keyword arguments from function signature Example:: >> def f(self, a, b=9): pass >> _get_default_kwargs(f) {\"b\": 9}",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:_get_default_kwargs arg:f arguments arg Assign For Call Call If Compare Assign If Compare Assign If Compare Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "ChannelsOrder",
    "source_code": "class ChannelsOrder(Enum):\n    CHANNELS_FIRST = 0\n    CHANNELS_LAST = 1",
    "docstring": "Enum that represents the channels order of an image.",
    "type": "class",
    "file_path": "kornia\\kornia\\image\\base.py",
    "ast_data": "ClassDef name:ChannelsOrder Assign Assign"
  },
  {
    "library": "scipy",
    "name": "to_ss",
    "source_code": "def to_ss(self):\n    return copy.deepcopy(self)",
    "docstring": "Return a copy of the current system. Returns ------- sys : instance of The current system (copy)",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_ss arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "BoundingBoxDataFormat",
    "source_code": "class BoundingBoxDataFormat(Enum):\n    XYWH = 0\n    XYXY = 1\n    CXCYWH = 2\n    CENTER_XYWH = 2",
    "docstring": "Enum class that maps bounding box data format.",
    "type": "class",
    "file_path": "kornia\\kornia\\models\\detection\\base.py",
    "ast_data": "ClassDef name:BoundingBoxDataFormat Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "stop_on_exception",
    "source_code": "def stop_on_exception(self):\n    return self._coord.stop_on_exception()",
    "docstring": "Context handler to stop the supervisor when an exception is raised. See . Returns: A context handler.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:stop_on_exception arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_accumulate",
    "source_code": "def _accumulate(self, name: str, *, skipna: bool=True, **kwargs) -> StringArray:\n    if name == 'cumprod':\n        msg = f\"operation '{name}' not supported for dtype '{self.dtype}'\"\n        raise TypeError(msg)\n    tail: np.ndarray | None = None\n    na_mask: np.ndarray | None = None\n    ndarray = self._ndarray\n    np_func = {'cumsum': np.cumsum, 'cummin': np.minimum.accumulate, 'cummax': np.maximum.accumulate}[name]\n    if self._hasna:\n        na_mask = cast('npt.NDArray[np.bool_]', isna(ndarray))\n        if np.all(na_mask):\n            return type(self)(ndarray)\n        if skipna:\n            if name == 'cumsum':\n                ndarray = np.where(na_mask, '', ndarray)\n            else:\n                ndarray = ndarray.copy()\n                missing.pad_or_backfill_inplace(ndarray, method='pad', axis=0)\n                missing.pad_or_backfill_inplace(ndarray, method='backfill', axis=0)\n        else:\n            idx = np.argmax(na_mask)\n            tail = np.empty(len(ndarray) - idx, dtype='object')\n            tail[:] = self.dtype.na_value\n            ndarray = ndarray[:idx]\n    np_result = np_func(ndarray)\n    if tail is not None:\n        np_result = np.hstack((np_result, tail))\n    elif na_mask is not None:\n        np_result = np.where(na_mask, self.dtype.na_value, np_result)\n    result = type(self)(np_result)\n    return result",
    "docstring": "Return an ExtensionArray performing an accumulation operation. The underlying data type might change. Parameters ---------- name : str Name of the function, supported values are: - cummin - cummax - cumsum - cumprod skipna : bool, default True If True, skip NA values. **kwargs Additional keyword arguments passed to the accumulation function. Currently, there is no supported kwarg. Returns ------- array Raises ------ NotImplementedError : subclass does not define accumulations",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\string_.py",
    "ast_data": "FunctionDef name:_accumulate arg:self arg:name arguments arg arg arg arg If Compare Assign Raise Call Assign Assign If Assign Call Call If Call Return return:yes Call Call If If Compare Assign Call Assign Call Call Call Assign Call Assign Call Call Assign Assign Assign Call If Compare Assign Call If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_aberth",
    "source_code": "def _aberth(f, fp, x0, tol=1e-15, maxiter=50):\n    N = len(x0)\n    x = array(x0, complex)\n    beta = np.empty_like(x0)\n    for iteration in range(maxiter):\n        alpha = -f(x) / fp(x)\n        for k in range(N):\n            beta[k] = np.sum(1 / (x[k] - x[k + 1:]))\n            beta[k] += np.sum(1 / (x[k] - x[:k]))\n        x += alpha / (1 + alpha * beta)\n        if not all(np.isfinite(x)):\n            raise RuntimeError('Root-finding calculation failed')\n        if all(abs(alpha) <= tol):\n            break\n    else:\n        raise Exception('Zeros failed to converge')\n    return x",
    "docstring": "Given a function , its first derivative , and a set of initial guesses , simultaneously find the roots of the polynomial using the Aberth-Ehrlich method. `f`. (This is not a complete implementation of Bini's algorithm.)",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_aberth arg:f arg:fp arg:x0 arg:tol arg:maxiter arguments arg arg arg arg arg Assign Call Assign Call Assign Call For Call Assign Call Call For Call Assign Call Call If Call Call Raise Call If Call Compare Call Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_RealParameter",
    "source_code": "class _RealParameter(_Parameter):\n\n    def validate(self, arr, parameter_values):\n        arr = np.asarray(arr)\n        valid_dtype = None\n        if arr.dtype == np.float64 or arr.dtype == np.float32:\n            pass\n        elif arr.dtype == np.int32 or arr.dtype == np.int64:\n            arr = np.asarray(arr, dtype=np.float64)\n        elif np.issubdtype(arr.dtype, np.floating):\n            pass\n        elif np.issubdtype(arr.dtype, np.integer):\n            arr = np.asarray(arr, dtype=np.float64)\n        else:\n            message = f'Parameter `{self.name}` must be of real dtype.'\n            raise TypeError(message)\n        valid = self.domain.contains(arr, parameter_values)\n        valid = valid & valid_dtype if valid_dtype is not None else valid\n        return (arr[()], arr.dtype, valid)",
    "docstring": "Represents a real-valued parameter. Implements the remaining methods of _Parameter for real parameters. All attributes are inherited.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py",
    "ast_data": "ClassDef name:_RealParameter FunctionDef name:validate arg:self arg:arr arg:parameter_values arguments arg arg arg Assign Call Assign If BoolOp Compare Compare If BoolOp Compare Compare Assign Call If Call If Call Assign Call Assign Raise Call Assign Call Assign Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "partial_run_setup",
    "source_code": "def partial_run_setup(self, fetches, feeds=None):\n    raise NotImplementedError('partial_run_setup is not implemented for debug-wrapper sessions.')",
    "docstring": "Sets up the feeds and fetches for partial runs in the session.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:partial_run_setup arg:self arg:fetches arg:feeds arguments arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_xy2",
    "source_code": "def set_xy2(self, *args, **kwargs):\n    if self._slope is None:\n        params = _api.select_matching_signature([lambda self, x, y: locals(), lambda self, xy2: locals()], self, *args, **kwargs)\n        if 'x' in params:\n            _api.warn_deprecated('3.10', message='Passing x and y separately to AxLine.set_xy2 is deprecated since %(since)s; pass them as a single tuple instead.')\n            xy2 = (params['x'], params['y'])\n        else:\n            xy2 = params['xy2']\n        self._xy2 = xy2\n    else:\n        raise ValueError(\"Cannot set an 'xy2' value while 'slope' is set; they differ but their functionalities overlap\")",
    "docstring": "Set the *xy2* value of the line. .. note:: You can only set *xy2* if the line was created using the *xy2* parameter. If the line was created using *slope*, please use . Parameters ---------- xy2 : tuple[float, float] Points for the line to pass through.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:set_xy2 arg:self arguments arg arg arg If Compare Assign Call arguments arg arg arg Call arguments arg arg Call If Compare Call Assign Assign Assign Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_UnwrapPreventer",
    "source_code": "class _UnwrapPreventer(object):\n    __slots__ = ['value']\n\n    def __init__(self, value):\n        self.value = value",
    "docstring": "Wrapper that DistributionStrategy will not unwrap. Typically, DistributionStrategy will unwrap values when going from a cross- replica context to a replica context via . This class is a wrapper that DistributionStrategy will not unwrap, so it can be used to prevent it from unwrapping a value. TODO(reedwm): Find/implement a better way of preventing values from being unwrapped by DistributionStrategy",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py",
    "ast_data": "ClassDef name:_UnwrapPreventer Assign FunctionDef name:__init__ arg:self arg:value arguments arg arg Assign"
  },
  {
    "library": "pandas",
    "name": "generate_value_label",
    "source_code": "def generate_value_label(self, byteorder: str) -> bytes:\n    encoding = self._encoding\n    bio = BytesIO()\n    null_byte = b'\\x00'\n    bio.write(struct.pack(byteorder + 'i', self.len))\n    labname = str(self.labname)[:32].encode(encoding)\n    lab_len = 32 if encoding not in ('utf-8', 'utf8') else 128\n    labname = _pad_bytes(labname, lab_len + 1)\n    bio.write(labname)\n    for i in range(3):\n        bio.write(struct.pack('c', null_byte))\n    bio.write(struct.pack(byteorder + 'i', self.n))\n    bio.write(struct.pack(byteorder + 'i', self.text_len))\n    for offset in self.off:\n        bio.write(struct.pack(byteorder + 'i', offset))\n    for value in self.val:\n        bio.write(struct.pack(byteorder + 'i', value))\n    for text in self.txt:\n        bio.write(text + null_byte)\n    return bio.getvalue()",
    "docstring": "Generate the binary representation of the value labels. Parameters ---------- byteorder : str Byte order of the output Returns ------- value_label : bytes Bytes containing the formatted value label",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\stata.py",
    "ast_data": "FunctionDef name:generate_value_label arg:self arg:byteorder arguments arg arg Assign Assign Call Assign Call Call Assign Call Call Assign Compare Assign Call Call For Call Call Call Call Call Call Call For Call Call For Call Call For Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cell):\n    self._cell = cell",
    "docstring": "Creates a new SamplerCell. Args: cell: A c pointer of TFE_MonitoringSamplerCell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cell arguments arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "dim",
    "source_code": "@_onnx_symbolic('aten::dim')\ndef dim(g: jit_utils.GraphContext, self):\n    shape = g.op('Shape', self)\n    return g.op('Size', shape)",
    "docstring": "Implement the dim functionality available for a pytorch tensor in ONNX",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:dim arg:g arg:self arguments arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "issues_closed_since",
    "source_code": "def issues_closed_since(period=timedelta(days=365), project='matplotlib/matplotlib', pulls=False):\n    which = 'pulls' if pulls else 'issues'\n    if isinstance(period, timedelta):\n        since = round_hour(datetime.utcnow() - period)\n    else:\n        since = period\n    url = f'https://api.github.com/repos/{project}/{which}?state=closed&sort=updated&since={since.strftime(ISO8601)}&per_page={PER_PAGE}'\n    allclosed = get_paged_request(url, headers=make_auth_header())\n    filtered = (i for i in allclosed if _parse_datetime(i['closed_at']) > since)\n    if pulls:\n        filtered = (i for i in filtered if _parse_datetime(i['merged_at']) > since)\n        filtered = (i for i in filtered if i['base']['ref'] == 'main')\n    else:\n        filtered = (i for i in filtered if not is_pull_request(i))\n    return list(filtered)",
    "docstring": "Get all issues closed since a particular point in time. *period* can either be a datetime object, or a timedelta object. In the latter case, it is used as a time before the present.",
    "type": "function",
    "file_path": "matplotlib\\tools\\github_stats.py",
    "ast_data": "FunctionDef name:issues_closed_since arg:period arg:project arg:pulls arguments arg arg arg Call Assign If Call Assign Call Call Assign Assign Call Assign Call Call Assign Compare Call If Assign Compare Call Assign Compare Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "to_numpy_helper",
    "source_code": "def to_numpy_helper(value):\n    if is_fake(value):\n        return value\n    if isinstance(value, tnp.ndarray):\n        return to_numpy_helper(value.tensor)\n    elif isinstance(value, torch.Tensor):\n        return value.numpy(force=True)\n    elif isinstance(value, (tuple, list)):\n        return type(value)((to_numpy_helper(obj) for obj in value))\n    else:\n        return value",
    "docstring": "Convert tensor and tnp.ndarray to numpy.ndarray.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:to_numpy_helper arg:value arguments arg If Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_set_func_list_attr",
    "source_code": "def _set_func_list_attr(self, attr_name, func_names) -> None:\n    funcs = [attr_value_pb2.NameAttrList(name=func_name) for func_name in func_names]\n    funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)\n    self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))",
    "docstring": "Private method used to set a list(function) attribute in the node_def.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_set_func_list_attr arg:self arg:attr_name arg:func_names arguments arg arg arg Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "multi_margin_loss",
    "source_code": "def multi_margin_loss(input: Tensor, target: Tensor, p: int=1, margin: float=1.0, weight: Optional[Tensor]=None, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n    if has_torch_function_variadic(input, target, weight):\n        return handle_torch_function(multi_margin_loss, (input, target, weight), input, target, p=p, margin=margin, weight=weight, size_average=size_average, reduce=reduce, reduction=reduction)\n    if size_average is not None or reduce is not None:\n        reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n    else:\n        reduction_enum = _Reduction.get_enum(reduction)\n    if p != 1 and p != 2:\n        raise ValueError('only p == 1 and p == 2 supported')\n    if weight is not None:\n        if weight.dim() != 1:\n            raise ValueError('weight must be one-dimensional')\n    return torch._C._nn.multi_margin_loss(input, target, p, margin, weight, reduction_enum)",
    "docstring": "Compute the multi margin loss, with optional weighting. See :class: for details. Args: input (Tensor): Predicted values. target (Tensor): Ground truth values. p (int, optional): Has a default value of 1. 1 and 2 are the only supported values. margin (float, optional): Margin for multi margin loss. Has a default value of 1. weight (Tensor, optional): Weights for each sample. Default: None. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. Returns: Tensor: Multi margin loss (optionally weighted).",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:multi_margin_loss arg:input arg:target arg:p arg:margin arg:weight arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call If BoolOp Compare Compare Raise Call If Compare If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "pygame",
    "name": "__copy__",
    "source_code": "def __copy__(self):\n    return self.__class__(self)",
    "docstring": "Clone the current Cursor object. You can do the same thing by doing Cursor(Cursor).",
    "type": "method",
    "file_path": "pygame\\src_py\\cursors.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_code",
    "source_code": "def get_code(fn: Callable[P, _T], *args: P.args, **kwargs: P.kwargs) -> list[str]:\n    from .graph import GraphLowering\n    source_codes: list[str] = []\n\n    def save_output_code(code: str) -> None:\n        source_codes.append(code)\n\n    def patched_compile_to_module(self: GraphLowering) -> Any:\n\n        class DummyModule:\n\n            def __init__(self) -> None:\n                pass\n\n            def call(self, *args: Any, **kwargs: Any) -> None:\n                pass\n        wrapper_code, kernel_code = self.codegen_with_cpp_wrapper() if self.cpp_wrapper else self.codegen()\n        nonlocal save_output_code\n        save_output_code(wrapper_code.value)\n        if kernel_code:\n            save_output_code(kernel_code.value)\n        return DummyModule()\n    with mock.patch.object(GraphLowering, 'compile_to_module', patched_compile_to_module), mock.patch.object(GraphLowering, 'save_output_code', save_output_code):\n        torch._dynamo.reset()\n        _ = fn(*args, **kwargs)\n    return source_codes",
    "docstring": "Get the inductor-generated code, but skip any actual compilation or running.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:get_code arg:fn arguments arg arg arg FunctionDef name:save_output_code arg:code arguments arg Call FunctionDef name:patched_compile_to_module arg:self arguments arg ClassDef name:DummyModule FunctionDef name:__init__ arg:self arguments arg FunctionDef name:call arg:self arguments arg arg arg Assign Call Call Call If Call Return return:yes Call With Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_canonical_name_meson",
    "source_code": "def get_canonical_name_meson(target, build_path):\n    assert len(target['filename']) == 1\n    shared_library_path = Path(target['filename'][0])\n    shared_library_relative_path = shared_library_path.relative_to(build_path.absolute())\n    rel_path = shared_library_relative_path.as_posix()\n    pattern = '\\\\.(cpython|cp\\\\d+)-.+'\n    return re.sub(pattern, '', str(rel_path))",
    "docstring": "Return a name based on generated shared library. The goal is to return a name that can be easily matched with the output from . Look at docstring to see what looks like.",
    "type": "function",
    "file_path": "scikit-learn\\build_tools\\check-meson-openmp-dependencies.py",
    "ast_data": "FunctionDef name:get_canonical_name_meson arg:target arg:build_path arguments arg arg Compare Call Assign Call Assign Call Call Assign Call Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "save",
    "source_code": "def save(self, destination: str | os.PathLike, *, include_initializers: bool=True, keep_initializers_as_inputs: bool=False, external_data: bool | None=None):\n    original_initializers = copy.copy(self.model.graph.initializers)\n    original_inputs = copy.copy(self.model.graph.inputs)\n    if not include_initializers:\n        self.model.graph.initializers.clear()\n    if keep_initializers_as_inputs:\n        self.model.graph.inputs.extend(original_initializers.values())\n    try:\n        if external_data or _count_initializer_size(self.model.graph) > _LARGE_MODEL_THRESHOLD:\n            onnxscript_apis.save_model_with_external_data(self.model, destination)\n        else:\n            ir.save(self.model, destination)\n    finally:\n        if not include_initializers:\n            self.model.graph.initializers.update(original_initializers)\n        if keep_initializers_as_inputs:\n            self.model.graph.inputs.clear()\n            self.model.graph.inputs.extend(original_inputs)",
    "docstring": "Save the ONNX model to the specified destination. When `True` is not a file path.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:save arg:self arg:destination arguments arg arg arg arg arg Assign Call Assign Call If Call If Call Call Try If BoolOp Compare Call Call Call If Call If Call Call"
  },
  {
    "library": "pandas",
    "name": "items",
    "source_code": "def items(self) -> Iterator[tuple[str, list]]:\n    for g in self.groups():\n        yield (g._v_pathname, g)",
    "docstring": "iterate on key->group",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:items arg:self arguments arg For Call"
  },
  {
    "library": "tensorflow",
    "name": "TFLiteConverterMetrics",
    "source_code": "class TFLiteConverterMetrics(TFLiteMetrics):\n\n    def __init__(self) -> None:\n        super(TFLiteConverterMetrics, self).__init__()\n        session_id = uuid.uuid4().hex\n        self._metrics_exporter = metrics_wrapper.MetricsWrapper(session_id)\n        self._exported = False\n\n    def __del__(self):\n        if not self._exported:\n            self.export_metrics()\n\n    def set_export_required(self):\n        self._exported = False\n\n    def export_metrics(self):\n        self._metrics_exporter.ExportMetrics()\n        self._exported = True",
    "docstring": "Similar to TFLiteMetrics but specialized for converter. A unique session id will be created for each new TFLiteConverterMetrics.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\metrics\\metrics_nonportable.py",
    "ast_data": "ClassDef name:TFLiteConverterMetrics FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Assign Call Assign FunctionDef name:__del__ arg:self arguments arg If Call FunctionDef name:set_export_required arg:self arguments arg Assign FunctionDef name:export_metrics arg:self arguments arg Call Assign"
  },
  {
    "library": "pytorch",
    "name": "quantize_per_token",
    "source_code": "@impl(quantized_decomposed_lib, 'quantize_per_token', 'CompositeExplicitAutograd')\ndef quantize_per_token(input: torch.Tensor, scales: torch.Tensor, zero_points: torch.Tensor, quant_min: int, quant_max: int, dtype: torch.dtype):\n    _quant_min_max_bounds_check(quant_min, quant_max, dtype)\n    _per_token_quant_qparam_dim_check(input, scales, zero_points)\n    input = input.mul(1.0 / scales).add(zero_points).round().clamp(quant_min, quant_max).to(dtype)\n    return input",
    "docstring": "Per token quantization for the Tensor using the quantization parameters to map from floating point to quantized values. This means for a N dimension Tensor (M1, M2, ...Mn, N), we calculate scales/zero_points for each N elements and quantize every N elements with the same quantization parameter. The dimension for scales/zero_points will be (M1 * M2 ... * Mn) Args: input (torch.Tensor): original float32 or bfloat16 Tensor scales (float32 torch.Tensor): quantization parameter for per token affine quantization zero_points (int32 torch.Tensor): quantization parameter for per token affine quantization quant_min (int): minimum quantized value for output Tensor quant_max (int): maximum quantized value for output Tensor dtype (torch.dtype): requested dtype (e.g. torch.uint8) for output Tensor Returns: Tensor with requested dtype (e.g. torch.uint8), note the quantization parameters are not stored in the Tensor, we are storing them in function arguments instead",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py",
    "ast_data": "FunctionDef name:quantize_per_token arg:input arg:scales arg:zero_points arg:quant_min arg:quant_max arg:dtype arguments arg arg arg arg arg arg Call Call Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_log_device_placement",
    "source_code": "@tf_export('debugging.set_log_device_placement')\ndef set_log_device_placement(enabled):\n    context().log_device_placement = enabled",
    "docstring": "Turns logging for device placement decisions on or off. Operations execute on a particular device, producing and consuming tensors on that device. This may change the performance of the operation or require TensorFlow to copy data to or from an accelerator, so knowing where operations execute is useful for debugging performance issues. For more advanced profiling, use the [TensorFlow profiler]( Device placement for operations is typically controlled by a scope, but there are exceptions, for example operations on a which follow the initial placement of the variable. Turning off soft device placement (with ) provides more explicit control. >>> tf.debugging.set_log_device_placement(True) >>> tf.ones([]) >>> # [...] op Fill in device /job:localhost/replica:0/task:0/device:GPU:0 >>> with tf.device(\"CPU\"): ... tf.ones([]) >>> # [...] op Fill in device /job:localhost/replica:0/task:0/device:CPU:0 >>> tf.debugging.set_log_device_placement(False) Turning on also logs the placement of ops inside when the function is called. Args: enabled: Whether to enabled device placement logging.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:set_log_device_placement arg:enabled arguments arg Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_array",
    "source_code": "def get_array(self):\n    return self._A",
    "docstring": "Return the array of values, that are mapped to colors. The base class does not make any assumptions on the dimensionality and shape of the array.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:get_array arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_set_post_op_offset",
    "source_code": "def _set_post_op_offset(self, spec: DTensorSpec, old_offset: int) -> None:\n    dtensor_shape = spec.shape\n    from torch.distributed.tensor._ops.utils import prod\n    numel = prod(dtensor_shape)\n    numel = (numel + 3) // 4 * 4\n    self.set_offset('parallel-rng', old_offset + numel)",
    "docstring": "Sets the RNG to a synchronized state after running the local random op. Every rank should set its RNG offset to where old_offset is the offset before calling i.e. the offset before running DTensor random ops. Args: spec (:class:): the spec of the DTensor object on which we post-process the offset for running random ops. Returns: None",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_random.py",
    "ast_data": "FunctionDef name:_set_post_op_offset arg:self arg:spec arg:old_offset arguments arg arg arg Assign Assign Call Assign Call"
  },
  {
    "library": "authlib",
    "name": "get_amr",
    "source_code": "def get_amr(self) -> list[str]:\n    return None",
    "docstring": "Get the \"amr\" (Authentication Method Reference) value of the authorization code object. Have a look at :rfc: to see the full list of registered amr. def get_amr(self) -> list[str]: return [\"pwd\", \"otp\"]",
    "type": "method",
    "file_path": "authlib\\authlib\\oidc\\core\\models.py",
    "ast_data": "FunctionDef name:get_amr arg:self arguments arg Return return:no"
  },
  {
    "library": "pytorch",
    "name": "_get_binary_ops_configs",
    "source_code": "def _get_binary_ops_configs() -> list[BackendPatternConfig]:\n    dtype_configs = [qnnpack_default_op_qint8_symmetric_dtype_config, executorch_weighted_op_int8_dtype_config]\n    num_tensor_args_to_observation_type_mapping = {0: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT, 1: ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT, 2: ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT}\n    binary_op_configs: list[BackendPatternConfig] = []\n    for op in [operator.add, torch.add, operator.sub, torch.sub, operator.mul, torch.mul]:\n        bop_patterns = [(op, torch.nn.ReLU), (op, torch.nn.functional.relu), (op, torch.relu), op]\n        binary_op_configs.extend((BackendPatternConfig(bop_pattern).set_dtype_configs(dtype_configs)._set_num_tensor_args_to_observation_type(num_tensor_args_to_observation_type_mapping) for bop_pattern in bop_patterns))\n    return binary_op_configs",
    "docstring": "Return all configs related to binary ops.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\executorch.py",
    "ast_data": "FunctionDef name:_get_binary_ops_configs arguments Assign Assign For Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "select",
    "source_code": "def select(self, attributes):\n    self._options['select'] = copy.copy(attributes)\n    return self",
    "docstring": "Select the attributes to display. See for supported attributes. Args: attributes: A list of attribute the profiler node has. Returns: self",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:select arg:self arg:attributes arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "AMGM",
    "source_code": "class AMGM(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([0.0] * self.N, [10.0] * self.N))\n        self.global_optimum = [[1, 1]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        f1 = sum(x)\n        f2 = prod(x)\n        f1 = f1 / self.N\n        f2 = f2 ** (1.0 / self.N)\n        f = (f1 - f2) ** 2\n        return f",
    "docstring": "AMGM objective function. The AMGM (Arithmetic Mean - Geometric Mean Equality) global optimization problem is a multimodal minimization problem defined as follows .. math:: f_{\\text{AMGM}}(x) = \\left ( \\frac{1}{n} \\sum_{i=1}^{n} x_i - \\sqrt[n]{ \\prod_{i=1}^{n} x_i} \\right )^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO, retrieved 2015 TODO: eqn 7 in [1]_ has the wrong global minimum value.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_A.py",
    "ast_data": "ClassDef name:AMGM Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Call Assign Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_set_autoscale_on",
    "source_code": "def _set_autoscale_on(self, b):\n    if b is not None:\n        self._autoscale_on = b",
    "docstring": "Set whether this Axis is autoscaled when drawing or by . If b is None, then the value is not changed. Parameters ---------- b : bool",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_set_autoscale_on arg:self arg:b arguments arg arg If Compare Assign"
  },
  {
    "library": "tensorflow",
    "name": "_apply_dense",
    "source_code": "def _apply_dense(self, grad, var):\n    raise NotImplementedError()",
    "docstring": "Add ops to apply dense gradients to . Args: grad: A . var: A object. Returns: An .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_apply_dense arg:self arg:grad arg:var arguments arg arg arg Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "ThetaLocator",
    "source_code": "class ThetaLocator(mticker.Locator):\n\n    def __init__(self, base):\n        self.base = base\n        self.axis = self.base.axis = _AxisWrapper(self.base.axis)\n\n    def set_axis(self, axis):\n        self.axis = _AxisWrapper(axis)\n        self.base.set_axis(self.axis)\n\n    def __call__(self):\n        lim = self.axis.get_view_interval()\n        if _is_full_circle_deg(lim[0], lim[1]):\n            return np.deg2rad(min(lim)) + np.arange(8) * 2 * np.pi / 8\n        else:\n            return np.deg2rad(self.base())\n\n    def view_limits(self, vmin, vmax):\n        vmin, vmax = np.rad2deg((vmin, vmax))\n        return np.deg2rad(self.base.view_limits(vmin, vmax))",
    "docstring": "Used to locate theta ticks. This will work the same as the base locator except in the case that the view spans the entire circle. In such cases, the previously used default locations of every 45 degrees are returned.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "ClassDef name:ThetaLocator FunctionDef name:__init__ arg:self arg:base arguments arg arg Assign Assign Call FunctionDef name:set_axis arg:self arg:axis arguments arg arg Assign Call Call FunctionDef name:__call__ arg:self arguments arg Assign Call If Call Return return:yes Call Call Call Return return:yes Call Call FunctionDef name:view_limits arg:self arg:vmin arg:vmax arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "sphinx",
    "name": "_get_documenter",
    "source_code": "def _get_documenter(obj: Any, parent: Any, *, registry: SphinxComponentRegistry) -> type[Documenter]:\n    from sphinx.ext.autodoc import DataDocumenter, ModuleDocumenter\n    if inspect.ismodule(obj):\n        return ModuleDocumenter\n    if parent is not None:\n        parent_doc_cls = _get_documenter(parent, None, registry=registry)\n    else:\n        parent_doc_cls = ModuleDocumenter\n    if hasattr(parent, '__name__'):\n        parent_doc = parent_doc_cls(FakeDirective(), parent.__name__)\n    else:\n        parent_doc = parent_doc_cls(FakeDirective(), '')\n    classes = [cls for cls in registry.documenters.values() if cls.can_document_member(obj, '', False, parent_doc)]\n    if classes:\n        classes.sort(key=lambda cls: cls.priority)\n        return classes[-1]\n    else:\n        return DataDocumenter",
    "docstring": "Get an autodoc.Documenter class suitable for documenting the given object. *obj* is the Python object to be documented, and *parent* is an another Python object (e.g. a module or a class) to which *obj* belongs to.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\autosummary\\__init__.py",
    "ast_data": "FunctionDef name:_get_documenter arg:obj arg:parent arguments arg arg arg If Call Return return:yes If Compare Assign Call Assign If Call Assign Call Call Assign Call Call Assign Call Call If Call arguments arg Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_type",
    "source_code": "def get_type(type):\n    if isinstance(type, str):\n        return type\n    elif inspect.getmodule(type) == typing:\n        type_to_string = str(type)\n        return type_to_string.replace(type.__module__ + '.', '')\n    elif is_torch_native_class(type):\n        return type.__module__ + '.' + type.__name__\n    else:\n        return type.__name__",
    "docstring": "Convert the given type to a torchScript acceptable format.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_monkeytype_config.py",
    "ast_data": "FunctionDef name:get_type arg:type arguments arg If Call Return return:yes If Compare Call Assign Call Return return:yes Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "kornia",
    "name": "identity_matrix",
    "source_code": "def identity_matrix(self, input: Tensor) -> Tensor:\n    return eye_like(3, input)",
    "docstring": "Return 3x3 identity matrix.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\_2d\\base.py",
    "ast_data": "FunctionDef name:identity_matrix arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_quantile",
    "source_code": "def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self:\n    pa_dtype = self._pa_array.type\n    data = self._pa_array\n    if pa.types.is_temporal(pa_dtype):\n        nbits = pa_dtype.bit_width\n        if nbits == 32:\n            data = data.cast(pa.int32())\n        else:\n            data = data.cast(pa.int64())\n    result = pc.quantile(data, q=qs, interpolation=interpolation)\n    if pa.types.is_temporal(pa_dtype):\n        if pa.types.is_floating(result.type):\n            result = pc.floor(result)\n        nbits = pa_dtype.bit_width\n        if nbits == 32:\n            result = result.cast(pa.int32())\n        else:\n            result = result.cast(pa.int64())\n        result = result.cast(pa_dtype)\n    return type(self)(result)",
    "docstring": "Compute the quantiles of self for each quantile in . Parameters ---------- qs : np.ndarray[float64] interpolation: str Returns ------- same type as self",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:_quantile arg:self arg:qs arg:interpolation arguments arg arg arg Assign Assign If Call Assign If Compare Assign Call Call Assign Call Call Assign Call If Call If Call Assign Call Assign If Compare Assign Call Call Assign Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "wrapped_intermediates",
    "source_code": "@property\ndef wrapped_intermediates(self):\n    return list(self._wrapped_intermediates.values())",
    "docstring": "The optional-wrapped intermediates captured from the forward graph.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:wrapped_intermediates arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "__init__",
    "source_code": "def __init__(self, *system, **kwargs):\n    if isinstance(system[0], LinearTimeInvariant):\n        return\n    super().__init__(**kwargs)\n    self._zeros = None\n    self._poles = None\n    self._gain = None\n    self.zeros, self.poles, self.gain = system",
    "docstring": "Initialize the zeros, poles, gain system.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg arg If Call Return return:no Call Call Assign Assign Assign Assign"
  },
  {
    "library": "django",
    "name": "equals_identical",
    "source_code": "def equals_identical(self, other):\n    if geos_version_tuple() < (3, 12):\n        raise GEOSException('GEOSGeometry.equals_identical() requires GEOS >= 3.12.0.')\n    return capi.geos_equalsidentical(self.ptr, other.ptr)",
    "docstring": "Return true if the two Geometries are point-wise equivalent.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:equals_identical arg:self arg:other arguments arg arg If Compare Call Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Schwefel20",
    "source_code": "class Schwefel20(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum(abs(x))",
    "docstring": "Schwefel 20 objective function. This class defines the Schwefel 20 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{Schwefel20}}(x) = \\sum_{i=1}^n \\lvert x_i \\rvert Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: Jamil #122 is incorrect. There shouldn't be a leading minus sign.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Schwefel20 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_module_forward_pre_hook",
    "source_code": "def register_module_forward_pre_hook(hook: Callable[..., None]) -> RemovableHandle:\n    handle = RemovableHandle(_global_forward_pre_hooks)\n    _global_forward_pre_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a forward pre-hook common to all modules. .. warning :: This adds global state to the module and it is only intended for debugging/profiling purposes. The hook will be called every time before :func: is invoked. It should have the following signature:: hook(module, input) -> None or modified input The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the `torch.utils.hooks.RemovableHandle`",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:register_module_forward_pre_hook arg:hook arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "pygame",
    "name": "resource_stream",
    "source_code": "def resource_stream(_package_of_requirement, _resource_name):\n    raise NotImplementedError",
    "docstring": "A stub for when we fail to import this function. Always raises a NotImplementedError when called.",
    "type": "function",
    "file_path": "pygame\\src_py\\pkgdata.py",
    "ast_data": "FunctionDef name:resource_stream arg:_package_of_requirement arg:_resource_name arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "max_memory_reserved",
    "source_code": "def max_memory_reserved(device: _device_t=None) -> int:\n    return memory_stats(device=device).get('reserved_bytes.all.peak', 0)",
    "docstring": "Return the maximum GPU memory managed by the caching allocator in bytes for a given device. By default, this returns the peak cached memory since the beginning of this program. :func: can be used to reset the starting point in tracking this metric. For example, these two functions can measure the peak cached memory amount of each iteration in a training loop. Args: device (torch.device or int or str, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default).",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\memory.py",
    "ast_data": "FunctionDef name:max_memory_reserved arg:device arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "NoRestoreSaveable",
    "source_code": "class NoRestoreSaveable(saveable_object.SaveableObject):\n\n    def __init__(self, tensor, name, dtype=None, device=None):\n        spec = saveable_object.SaveSpec(tensor, '', name, dtype=dtype, device=device)\n        super(NoRestoreSaveable, self).__init__(tensor, [spec], name)\n\n    def restore(self, restored_tensors, restored_shapes):\n        return gen_control_flow_ops.no_op()",
    "docstring": "Embeds a tensor in a checkpoint with no restore ops.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "ClassDef name:NoRestoreSaveable FunctionDef name:__init__ arg:self arg:tensor arg:name arg:dtype arg:device arguments arg arg arg arg arg Assign Call Call Call FunctionDef name:restore arg:self arg:restored_tensors arg:restored_shapes arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "RawToRgb",
    "source_code": "class RawToRgb(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 1, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def __init__(self, cfa: CFA) -> None:\n        super().__init__()\n        self.cfa = cfa\n\n    def forward(self, image: torch.Tensor) -> torch.Tensor:\n        return raw_to_rgb(image, cfa=self.cfa)",
    "docstring": "Module to convert a bayer raw image to RGB version of image. The image data is assumed to be in the range of (0, 1). Shape: - image: :math: - output: :math: Example: >>> rawinput = torch.rand(2, 1, 4, 6) >>> rgb = RawToRgb(CFA.RG) >>> output = rgb(rawinput) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\raw.py",
    "ast_data": "ClassDef name:RawToRgb FunctionDef name:__init__ arg:self arg:cfa arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_rotate_method",
    "source_code": "def set_rotate_method(rotate_method: str) -> None:\n    if rotate_method == 'allgather':\n        _cp_options.rotate_method = _RotateMethod.ALL_GATHER\n    elif rotate_method == 'alltoall':\n        _cp_options.rotate_method = _RotateMethod.ALL_TO_ALL\n    else:\n        raise NotImplementedError(f'Context Parallel does not support using {rotate_method} for kv shards rotation')",
    "docstring": "Context Parallel SDPA requires the rotation of kv shards. Users can call this API to specify which rotation method to use. \"alltoall\" shuffles the kv shards using all-to-all collective. While \"allgather\" gathers the kv shards using all-gather collective after the first sub-SDPA computation. If this API has not been called, the default rotate method is \"allgather\". Args: rotate_method (str): the rotate method to use. Currently only supports \"allgather\" and \"alltoall\". If a different string other than these two is passed in, the function will raise an error. Returns: None",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py",
    "ast_data": "FunctionDef name:set_rotate_method arg:rotate_method arguments arg If Compare Assign If Compare Assign Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_DerivedConstraint",
    "source_code": "@dataclasses.dataclass\nclass _DerivedConstraint(_ConstraintTarget):\n    name: str\n    constraint_range: 'StrictMinMaxConstraint'\n    root: Union[_ConstraintTarget, _PhantomRoot]\n    fn: Callable\n\n    @property\n    def serializable_spec(self):\n        return {'t_id': self.t_id, 'dim': self.dim, 'min': self.constraint_range.vr.lower, 'max': self.constraint_range.vr.upper}",
    "docstring": "This represents a derived Dim, whose root is either a regular constraint target (which directly specifies the shape of some input dimension) or a phantom root (which does so indirectly). It can be thought of as a subclass of , except that it does not support , >= operations.",
    "type": "class",
    "file_path": "pytorch\\torch\\export\\dynamic_shapes.py",
    "ast_data": "ClassDef name:_DerivedConstraint FunctionDef name:serializable_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "AddIndexConcurrently",
    "source_code": "class AddIndexConcurrently(NotInTransactionMixin, AddIndex):\n    atomic = False\n    category = OperationCategory.ADDITION\n\n    def describe(self):\n        return 'Concurrently create index %s on field(s) %s of model %s' % (self.index.name, ', '.join(self.index.fields), self.model_name)\n\n    def database_forwards(self, app_label, schema_editor, from_state, to_state):\n        self._ensure_not_in_transaction(schema_editor)\n        model = to_state.apps.get_model(app_label, self.model_name)\n        if self.allow_migrate_model(schema_editor.connection.alias, model):\n            schema_editor.add_index(model, self.index, concurrently=True)\n\n    def database_backwards(self, app_label, schema_editor, from_state, to_state):\n        self._ensure_not_in_transaction(schema_editor)\n        model = from_state.apps.get_model(app_label, self.model_name)\n        if self.allow_migrate_model(schema_editor.connection.alias, model):\n            schema_editor.remove_index(model, self.index, concurrently=True)",
    "docstring": "Create an index using PostgreSQL's CREATE INDEX CONCURRENTLY syntax.",
    "type": "class",
    "file_path": "django\\django\\contrib\\postgres\\operations.py",
    "ast_data": "ClassDef name:AddIndexConcurrently Assign Assign FunctionDef name:describe arg:self arguments arg Return return:yes Call FunctionDef name:database_forwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg Call Assign Call If Call Call FunctionDef name:database_backwards arg:self arg:app_label arg:schema_editor arg:from_state arg:to_state arguments arg arg arg arg arg Call Assign Call If Call Call"
  },
  {
    "library": "kornia",
    "name": "max_samples_by_conf",
    "source_code": "@staticmethod\ndef max_samples_by_conf(n_inl: int, num_tc: int, sample_size: int, conf: float) -> float:\n    eps = 1e-09\n    if num_tc <= sample_size:\n        return 1.0\n    if n_inl == num_tc:\n        return 1.0\n    return math.log(1.0 - conf) / min(-eps, math.log(max(eps, 1.0 - math.pow(n_inl / num_tc, sample_size))))",
    "docstring": "Update max_iter to stop iterations earlier",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\ransac.py",
    "ast_data": "FunctionDef name:max_samples_by_conf arg:n_inl arg:num_tc arg:sample_size arg:conf arguments arg arg arg arg Assign If Compare Return return:yes If Compare Return return:yes Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_configure_session_config_for_std_servers",
    "source_code": "def _configure_session_config_for_std_servers(strategy, eval_strategy, session_config, cluster_spec, task_type, task_id):\n    if task_type == _TaskType.EVALUATOR:\n        if eval_strategy:\n            eval_strategy.configure(session_config=session_config)\n    else:\n        strategy = copy.deepcopy(strategy)\n        strategy.configure(session_config=session_config, cluster_spec=cluster_spec, task_type=task_type, task_id=task_id)\n    del session_config.device_filters[:]",
    "docstring": "Call strategy's to mutate the session_config. The session_config is currently needed as default config for a TensorFlow server. In the future, we should be able to remove this method and only pass the session config to a client session.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:_configure_session_config_for_std_servers arg:strategy arg:eval_strategy arg:session_config arg:cluster_spec arg:task_type arg:task_id arguments arg arg arg arg arg arg If Compare If Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "cast",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef cast(self, value, cast_context):\n    return super().cast(value, cast_context)",
    "docstring": "See tf.types.experimental.TraceType base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:cast arg:self arg:value arg:cast_context arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "shape",
    "source_code": "@property\ndef shape(self):\n    if self._dense_shape is None:\n        return tensor_shape.TensorShape(None)\n    return tensor_util.constant_value_as_shape(self._dense_shape)",
    "docstring": "Gets the representing the shape of the dense tensor. Returns: A object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py",
    "ast_data": "FunctionDef name:shape arg:self arguments arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "enable_static_estimate",
    "source_code": "@torch.jit.export\ndef enable_static_estimate(self):\n    self.toggle_qparam_learning(enabled=False).toggle_fake_quant(enabled=True).toggle_observer_update(enabled=True)",
    "docstring": "Enable static estimates of quantization parameters. Enables static observer estimates and disables learning of quantization parameters. Forward path returns fake quantized X.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\_learnable_fake_quantize.py",
    "ast_data": "FunctionDef name:enable_static_estimate arg:self arguments arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_non_slot_variable",
    "source_code": "def _create_non_slot_variable(self, initial_value, name, colocate_with):\n    eager = ops.executing_eagerly_outside_functions()\n    graph = None if eager else colocate_with.graph\n    key = (name, graph)\n    v = self._non_slot_dict.get(key, None)\n    if v is None:\n        self._maybe_initialize_trackable()\n        distribution_strategy = distribute_lib.get_strategy()\n        with distribution_strategy.extended.colocate_vars_with(colocate_with):\n            if eager:\n                restored_initial_value = self._preload_simple_restoration(name=name)\n                if restored_initial_value is not None:\n                    initial_value = restored_initial_value\n            if self._use_own_namescope_for_non_slot_vars:\n                with ops.name_scope('', skip_on_eager=False):\n                    with variable_scope.variable_scope(self._non_slot_variable_scope):\n                        v = variable_scope.get_variable(initializer=initial_value, name=name, trainable=False, use_resource=resource_variable_ops.is_resource_variable(colocate_with))\n            else:\n                v = variable_v1.VariableV1(initial_value, name=name, trainable=False, use_resource=resource_variable_ops.is_resource_variable(colocate_with))\n        self._handle_deferred_dependencies(name=name, trackable=v)\n        self._non_slot_dict[key] = v\n    return v",
    "docstring": "Add an extra variable, not associated with a slot.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_create_non_slot_variable arg:self arg:initial_value arg:name arg:colocate_with arguments arg arg arg arg Assign Call Assign Assign Assign Call If Compare Call Assign Call With Call If Assign Call If Compare Assign If With Call With Call Assign Call Call Assign Call Call Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_all",
    "source_code": "def get_all(store, rank: int, prefix: str, world_size: int):\n    data_arr = store.multi_get([f'{prefix}{idx}' for idx in range(world_size)])\n    barrier_key = _barrier_nonblocking(store=store, world_size=world_size, key_prefix=f'{prefix}/finished')\n    if rank == 0:\n        store.wait([barrier_key])\n    return data_arr",
    "docstring": "Given a store and a prefix, the method goes through the array of keys of the following format: ``, where idx is in a range from 0 to size, and tries to retrieve the data. The Rank0 process waits at the end to make sure all other processes finished the procedure before exiting. Usage :: values = get_all(store, \"torchelastic/data\", 3) value1 = values[0] # retrieves the data for key torchelastic/data0 value2 = values[1] # retrieves the data for key torchelastic/data1 value3 = values[2] # retrieves the data for key torchelastic/data2",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\elastic\\utils\\store.py",
    "ast_data": "FunctionDef name:get_all arg:store arg:rank arg:prefix arg:world_size arguments arg arg arg arg Assign Call Call Assign Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "graph",
    "source_code": "@property\ndef graph(self):\n    return self._indices.graph",
    "docstring": "The that contains the index, value, and dense_shape tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py",
    "ast_data": "FunctionDef name:graph arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_global_rank",
    "source_code": "@deprecated('`torch.distributed.distributed_c10d._get_global_rank` is deprecated, please use `torch.distributed.distributed_c10d.get_global_rank` instead', category=FutureWarning)\ndef _get_global_rank(group, rank) -> int:\n    return get_global_rank(group, rank)",
    "docstring": "Use get_global_rank as this method is deprecated.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_get_global_rank arg:group arg:rank arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "to_proto",
    "source_code": "def to_proto(self):\n    nodes = [node_proto(v.debugName, input=v.inputs, outputsize=v.tensor_size, op=v.kind, attributes=v.attributes) for v in self.nodes_io.values()]\n    return nodes",
    "docstring": "Convert graph representation of GraphPy object to TensorBoard required format.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\_pytorch_graph.py",
    "ast_data": "FunctionDef name:to_proto arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "to_series",
    "source_code": "@final\ndef to_series(self, index=None, name: Hashable | None=None) -> Series:\n    from pandas import Series\n    if index is None:\n        index = self._view()\n    if name is None:\n        name = self.name\n    return Series(self._values.copy(), index=index, name=name)",
    "docstring": "Create a Series with both index and values equal to the index keys. Useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional Index of resulting Series. If None, defaults to original index. name : str, optional Name of resulting Series. If None, defaults to name of original index. Returns ------- Series The dtype will be based on the type of the Index values. See Also -------- Index.to_frame : Convert an Index to a DataFrame. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index([\"Ant\", \"Bear\", \"Cow\"], name=\"animal\") By default, the original index and original name is reused. >>> idx.to_series() animal Ant Ant Bear Bear Cow Cow Name: animal, dtype: object To enforce a new index, specify new labels to ``: >>> idx.to_series(name=\"zoo\") animal Ant Ant Bear Bear Cow Cow Name: zoo, dtype: object",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:to_series arg:self arg:index arg:name arguments arg arg arg If Compare Assign Call If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "__create_chunk_list__",
    "source_code": "def __create_chunk_list__(self) -> list[ChunkStorageMetadata]:\n    return self._storage_meta.chunks",
    "docstring": "For compatibility with DCP, we support creation of chunk lists such that they can be saved properly.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py",
    "ast_data": "FunctionDef name:__create_chunk_list__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "initial_form_count",
    "source_code": "def initial_form_count(self):\n    if self.is_bound:\n        return self.management_form.cleaned_data[INITIAL_FORM_COUNT]\n    else:\n        initial_forms = len(self.initial) if self.initial else 0\n    return initial_forms",
    "docstring": "Return the number of forms that are required in this FormSet.",
    "type": "method",
    "file_path": "django\\django\\forms\\formsets.py",
    "ast_data": "FunctionDef name:initial_form_count arg:self arguments arg If Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "value_from_object",
    "source_code": "def value_from_object(self, obj):\n    return getattr(obj, self.attname)",
    "docstring": "Return the value of this field in the given model instance.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "FunctionDef name:value_from_object arg:self arg:obj arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Sodp",
    "source_code": "class Sodp(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))\n        self.global_optimum = [[0 for _ in range(self.N)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        i = arange(1, self.N + 1)\n        return sum(abs(x) ** (i + 1))",
    "docstring": "Sodp objective function. This class defines the Sum Of Different Powers [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Sodp}}(x) = \\sum_{i=1}^{n} \\lvert{x_{i}}\\rvert^{i + 1} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py",
    "ast_data": "ClassDef name:Sodp Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_cf",
    "source_code": "def _cf(Phi, t, alpha, beta):\n    return np.exp(-np.abs(t) ** alpha * (1 - 1j * beta * np.sign(t) * Phi(alpha, t)))",
    "docstring": "Characteristic function.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_levy_stable\\__init__.py",
    "ast_data": "FunctionDef name:_cf arg:Phi arg:t arg:alpha arg:beta arguments arg arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "register_optimizer_step_post_hook",
    "source_code": "def register_optimizer_step_post_hook(hook: GlobalOptimizerPostHook) -> RemovableHandle:\n    handle = hooks.RemovableHandle(_global_optimizer_post_hooks)\n    _global_optimizer_post_hooks[handle.id] = hook\n    return handle",
    "docstring": "Register a post hook common to all optimizers. The hook should have the following signature:: hook(optimizer, args, kwargs) -> None Args: hook (Callable): A user defined hook which is registered on all optimizers. Returns: :class:: a handle that can be used to remove the added hook by calling ``",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:register_optimizer_step_post_hook arg:hook arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, x, nu=None, extrapolate=None):\n    if extrapolate is None:\n        extrapolate = self.extrapolate\n    else:\n        extrapolate = bool(extrapolate)\n    ndim = len(self.x)\n    x = _ndim_coords_from_arrays(x)\n    x_shape = x.shape\n    x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float64)\n    if nu is None:\n        nu = np.zeros((ndim,), dtype=np.intc)\n    else:\n        nu = np.asarray(nu, dtype=np.intc)\n        if nu.ndim != 1 or nu.shape[0] != ndim:\n            raise ValueError('invalid number of derivative orders nu')\n    dim1 = prod(self.c.shape[:ndim])\n    dim2 = prod(self.c.shape[ndim:2 * ndim])\n    dim3 = prod(self.c.shape[2 * ndim:])\n    ks = np.array(self.c.shape[:ndim], dtype=np.intc)\n    out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)\n    self._ensure_c_contiguous()\n    _ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3), self.x, ks, x, nu, bool(extrapolate), out)\n    return out.reshape(x_shape[:-1] + self.c.shape[2 * ndim:])",
    "docstring": "Evaluate the piecewise polynomial or its derivative Parameters ---------- x : array-like Points to evaluate the interpolant at. nu : tuple, optional Orders of derivatives to evaluate. Each must be non-negative. extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Returns ------- y : array-like Interpolated values. Shape is determined by replacing the interpolation axis in the original array with the shape of x. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:nu arg:extrapolate arguments arg arg arg arg If Compare Assign Assign Call Assign Call Assign Call Assign Assign Call Call If Compare Assign Call Assign Call If BoolOp Compare Compare Raise Call Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "skip",
    "source_code": "def skip(self, count, name=None) -> 'DatasetV2':\n    from tensorflow.python.data.ops import skip_op\n    return skip_op._skip(self, count, name)",
    "docstring": "Creates a that skips elements from this dataset. >>> dataset = tf.data.Dataset.range(10) >>> dataset = dataset.skip(7) >>> [a.item() for a in dataset.as_numpy_iterator()] [7, 8, 9] Args: count: A scalar , representing the number of elements of this dataset that should be skipped to form the new dataset. If is greater than the size of this dataset, the new dataset will contain no elements. If is -1, skips the entire dataset. name: (Optional.) A name for the tf.data operation. Returns: A new with the transformation applied as described above.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:skip arg:self arg:count arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "length",
    "source_code": "@property\ndef length(self) -> Index:\n    return self.right - self.left",
    "docstring": "Return an Index with entries denoting the length of each Interval. The length of an interval is calculated as the difference between its and bounds. This property is particularly useful when working with intervals where the size of the interval is an important attribute, such as in time-series analysis or spatial data analysis. See Also -------- arrays.IntervalArray.left : Return the left endpoints of each Interval in the IntervalArray as an Index. arrays.IntervalArray.right : Return the right endpoints of each Interval in the IntervalArray as an Index. arrays.IntervalArray.mid : Return the midpoint of each Interval in the IntervalArray as an Index. Examples -------- >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) >>> interv_arr [(0, 1], (1, 5]] Length: 2, dtype: interval[int64, right] >>> interv_arr.length Index([1, 4], dtype='int64')",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:length arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_bounds",
    "source_code": "def set_bounds(self, *args):\n    if len(args) == 1:\n        l, b, w, h = args[0]\n    else:\n        l, b, w, h = args\n    self._x0 = l\n    self._y0 = b\n    self._width = w\n    self._height = h\n    self.stale = True",
    "docstring": "Set the bounds of the rectangle as *left*, *bottom*, *width*, *height*. The values may be passed as separate parameters or as a tuple:: set_bounds(left, bottom, width, height) set_bounds((left, bottom, width, height)) .. ACCEPTS: (left, bottom, width, height)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_bounds arg:self arguments arg arg If Compare Call Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "event_shape",
    "source_code": "@property\ndef event_shape(self):\n    return tensor_shape.as_shape(self._event_shape())",
    "docstring": "Shape of a single sample from a single batch as a . May be partially defined or unknown. Returns: event_shape: , possibly unknown.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:event_shape arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_CVObjects",
    "source_code": "class _CVObjects(_Constraint):\n\n    def __init__(self):\n        super().__init__()\n        self._constraints = [Interval(Integral, 2, None, closed='left'), HasMethods(['split', 'get_n_splits']), _IterablesNotString(), _NoneConstraint()]\n\n    def is_satisfied_by(self, val):\n        return any((c.is_satisfied_by(val) for c in self._constraints))\n\n    def __str__(self):\n        return f'{', '.join([str(c) for c in self._constraints[:-1]])} or {self._constraints[-1]}'",
    "docstring": "Constraint representing cv objects. Convenient class for [ Interval(Integral, 2, None, closed=\"left\"), HasMethods([\"split\", \"get_n_splits\"]), _IterablesNotString(), None, ]",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py",
    "ast_data": "ClassDef name:_CVObjects FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call Call Call Call FunctionDef name:is_satisfied_by arg:self arg:val arguments arg arg Return return:yes Call Call FunctionDef name:__str__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "set_scale",
    "source_code": "def set_scale(self, scale):\n    self._scale = scale",
    "docstring": "Set the scale parameter.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "FunctionDef name:set_scale arg:self arg:scale arguments arg arg Assign"
  },
  {
    "library": "scipy",
    "name": "_bessel_poly",
    "source_code": "def _bessel_poly(n, reverse=False):\n    if abs(int(n)) != n:\n        raise ValueError('Polynomial order must be a nonnegative integer')\n    else:\n        n = int(n)\n    out = []\n    for k in range(n + 1):\n        num = _falling_factorial(2 * n - k, n)\n        den = 2 ** (n - k) * math.factorial(k)\n        out.append(num // den)\n    if reverse:\n        return out[::-1]\n    else:\n        return out",
    "docstring": "Return the coefficients of Bessel polynomial of degree If is true, a reverse Bessel polynomial is output. Output is a list of coefficients: [1] = 1 [1, 1] = 1*s + 1 [1, 3, 3] = 1*s^2 + 3*s + 3 [1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15 [1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105 etc. Output is a Python list of arbitrary precision long ints, so n is only limited by your hardware's memory. Sequence is and output can be confirmed to match : >>> from scipy.signal._filter_design import _bessel_poly >>> i = 0 >>> for n in range(51): ... for x in _bessel_poly(n, reverse=True): ... print(i, x) ... i += 1",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_filter_design.py",
    "ast_data": "FunctionDef name:_bessel_poly arg:n arg:reverse arguments arg arg If Compare Call Call Raise Call Assign Call Assign For Call Assign Call Assign Call Call If Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "parse_example_spec",
    "source_code": "@property\ndef parse_example_spec(self):\n    return {self.key: parsing_ops.FixedLenFeature(self.shape, self.dtype, self.default_value)}",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parse_example_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "is_authenticated",
    "source_code": "@property\ndef is_authenticated(self):\n    return True",
    "docstring": "Always return True. This is a way to tell if the user has been authenticated in templates.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\base_user.py",
    "ast_data": "FunctionDef name:is_authenticated arg:self arguments arg Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_forward",
    "source_code": "def _forward(self, values: ArrayLike) -> ArrayLike:\n    return values",
    "docstring": "Transform applied to native values before linear mapping into interval.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\properties.py",
    "ast_data": "FunctionDef name:_forward arg:self arg:values arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_next_partition",
    "source_code": "def _get_next_partition(self) -> tuple[int, float]:\n    rank = self._working_tensor_shape.rank\n    if rank is None or rank == 0:\n        return (0, math.inf)\n    num_elems = self._working_tensor_shape.num_elements()\n\n    def num_partitions(axis: int) -> float:\n        axis_len = self._working_tensor_shape.dims[axis].value\n        slice_elems = num_elems // axis_len\n        bytes_per_slice = slice_elems * self._dtype_size\n        slices_per_shard = self._shard_size_remaining // bytes_per_slice\n        if slices_per_shard == 0:\n            return math.inf\n        return math.ceil(axis_len / slices_per_shard)\n    min_parts = num_partitions(0)\n    min_axis = 0\n    for axis in range(1, rank):\n        parts_along_axis = num_partitions(axis)\n        part_size = num_elems * self._dtype_size / parts_along_axis\n        if parts_along_axis < min_parts and part_size <= self._shard_size_remaining:\n            min_axis, min_parts = (axis, int(parts_along_axis))\n    return (min_axis, math.ceil(int(self._working_tensor_shape[min_axis]) / min_parts))",
    "docstring": "Gets tensor partition with size closest to shard_size_remaining. Returns: A tuple containing the axis and size of the next partition.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\sharding\\sharding_policies.py",
    "ast_data": "FunctionDef name:_get_next_partition arg:self arguments arg Assign If BoolOp Compare Compare Return return:yes Assign Call FunctionDef name:num_partitions arg:axis arguments arg Assign Assign Assign Assign If Compare Return return:yes Return return:yes Call Assign Call Assign For Call Assign Call Assign If BoolOp Compare Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "split_by_sparsity",
    "source_code": "def split_by_sparsity(values):\n    dense_values = []\n    dense_indices = []\n    sparse_values = []\n    sparse_indices = []\n    for i, v in enumerate(values):\n        if is_indexed_slices(v):\n            sparse_values.append(v)\n            sparse_indices.append(i)\n        else:\n            dense_values.append(v)\n            dense_indices.append(i)\n    return (dense_values, dense_indices, sparse_values, sparse_indices)",
    "docstring": "Split values into dense and sparse values. Args: values: a list of tensors or s. Returns: Four lists: a list of dense values, a list of their indices in and a list of sparse values, a list of their indices in .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:split_by_sparsity arg:values arguments arg Assign Assign Assign Assign For Call If Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "ManyToOneRel",
    "source_code": "class ManyToOneRel(ForeignObjectRel):\n\n    def __init__(self, field, to, field_name, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, on_delete=None):\n        super().__init__(field, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete)\n        self.field_name = field_name\n\n    def __getstate__(self):\n        state = super().__getstate__()\n        state.pop('related_model', None)\n        return state\n\n    @property\n    def identity(self):\n        return (*super().identity, self.field_name)\n\n    def get_related_field(self):\n        field = self.model._meta.get_field(self.field_name)\n        if not field.concrete:\n            raise exceptions.FieldDoesNotExist(\"No related field named '%s'\" % self.field_name)\n        return field\n\n    def set_field_name(self):\n        self.field_name = self.field_name or self.model._meta.pk.name",
    "docstring": "Used by the ForeignKey field to store information about the relation. ``. This is unfortunate but the actual ManyToOneRel class is a private API and there is work underway to turn reverse relations into actual fields.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\fields\\reverse_related.py",
    "ast_data": "ClassDef name:ManyToOneRel FunctionDef name:__init__ arg:self arg:field arg:to arg:field_name arg:related_name arg:related_query_name arg:limit_choices_to arg:parent_link arg:on_delete arguments arg arg arg arg arg arg arg arg arg Call Call Assign FunctionDef name:__getstate__ arg:self arguments arg Assign Call Call Call Return return:yes FunctionDef name:identity arg:self arguments arg Return return:yes Call FunctionDef name:get_related_field arg:self arguments arg Assign Call If Raise Call Return return:yes FunctionDef name:set_field_name arg:self arguments arg Assign BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "remove_temp_dir_with_filepath",
    "source_code": "def remove_temp_dir_with_filepath(filepath, strategy):\n    remove_temp_dirpath(os.path.dirname(filepath), strategy)",
    "docstring": "Removes the temp path for file after writing is finished. Args: filepath: Original filepath that would be used without distribution. strategy: The tf.distribute strategy object currently used.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_file_utils.py",
    "ast_data": "FunctionDef name:remove_temp_dir_with_filepath arg:filepath arg:strategy arguments arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_ipc_handle",
    "source_code": "@classmethod\ndef from_ipc_handle(cls, device, handle):\n    return super().from_ipc_handle(device, handle)",
    "docstring": "Reconstruct an event from an IPC handle on the given device.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\streams.py",
    "ast_data": "FunctionDef name:from_ipc_handle arg:cls arg:device arg:handle arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "MethodsV0",
    "source_code": "class MethodsV0(Benchmark):\n    params = [['__abs__', '__neg__', '__pos__'], TYPES1]\n    param_names = ['methods', 'npdtypes']\n    timeout = 10\n\n    def setup(self, methname, npdtypes):\n        values = get_squares_()\n        self.xarg = values.get(npdtypes)[0]\n\n    def time_ndarray_meth(self, methname, npdtypes):\n        getattr(operator, methname)(self.xarg)",
    "docstring": "Benchmark for the methods which do not take any arguments",
    "type": "class",
    "file_path": "numpy\\benchmarks\\benchmarks\\bench_ufunc.py",
    "ast_data": "ClassDef name:MethodsV0 Assign Assign Assign FunctionDef name:setup arg:self arg:methname arg:npdtypes arguments arg arg arg Assign Call Assign Call FunctionDef name:time_ndarray_meth arg:self arg:methname arg:npdtypes arguments arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "reset_max_memory_cached",
    "source_code": "def reset_max_memory_cached(device: 'Device'=None) -> None:\n    warnings.warn('torch.cuda.reset_max_memory_cached now calls torch.cuda.reset_peak_memory_stats, which resets /all/ peak memory stats.', FutureWarning)\n    return reset_peak_memory_stats(device=device)",
    "docstring": "Reset the starting point in tracking maximum GPU memory managed by the caching allocator for a given device. See :func: for details. Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `~torch.cuda.reset_peak_memory_statscuda-memory-management` for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:reset_max_memory_cached arg:device arguments arg Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_set_unbacked_bindings",
    "source_code": "def _set_unbacked_bindings(out: object, out_proxy: _NestedProxys) -> None:\n    from .symbolic_shapes import compute_unbacked_bindings\n    fake_mode = torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FAKE)\n    if fake_mode and fake_mode.shape_env:\n        if (symbol_to_path := compute_unbacked_bindings(fake_mode.shape_env, out)):\n            assert isinstance(out_proxy, Proxy), out_proxy\n            out_proxy.node.meta['unbacked_bindings'] = symbol_to_path",
    "docstring": "A helper function for setting up unbacked_bindings on the destination FX graph.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py",
    "ast_data": "FunctionDef name:_set_unbacked_bindings arg:out arg:out_proxy arguments arg arg Assign Call If BoolOp If Call Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_epoch",
    "source_code": "def set_epoch(epoch):\n    global _epoch\n    if _epoch is not None:\n        raise RuntimeError('set_epoch must be called before dates plotted.')\n    _epoch = epoch",
    "docstring": "Set the epoch (origin for dates) for datetime calculations. The default epoch is :rc:. If microsecond accuracy is desired, the date being plotted needs to be within approximately 70 years of the epoch. Matplotlib internally represents dates as days since the epoch, so floating point dynamic range needs to be within a factor of 2^52. must be called before any dates are converted (i.e. near the import section) or a RuntimeError will be raised. See also :doc:. Parameters ---------- epoch : str valid UTC date parsable by (do not include timezone).",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "FunctionDef name:set_epoch arg:epoch arguments arg If Compare Raise Call Assign"
  },
  {
    "library": "scipy",
    "name": "single",
    "source_code": "@lazy_cython\ndef single(y):\n    return linkage(y, method='single', metric='euclidean')",
    "docstring": "Perform single/min/nearest linkage on the condensed distance matrix `scipy.cluster.hierarchy.linkagescipy.cluster.hierarchy.fclusterscipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram.",
    "type": "function",
    "file_path": "scipy\\scipy\\cluster\\hierarchy.py",
    "ast_data": "FunctionDef name:single arg:y arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_per_worker_variables",
    "source_code": "def _create_per_worker_variables(self, fn, args=None, kwargs=None):\n    results = []\n    for w in self._cluster.workers:\n        results.append(w.create_variable_resource(fn, args=args, kwargs=kwargs))\n    return PerWorkerValues(tuple(results))",
    "docstring": "Asynchronously create variables on workers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:_create_per_worker_variables arg:self arg:fn arg:args arg:kwargs arguments arg arg arg arg Assign For Call Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_to_scalar",
    "source_code": "def _to_scalar(x):\n    if isinstance(x, torch.Tensor) and x.dim() != 0:\n        return x.squeeze()\n    else:\n        return x",
    "docstring": "This function converts a hyperparameter to a 0-dimension (scalar) tensor if it is a nonzero-dimensions 1-element tensor. If it is not a tensor, it is kept as is. Args: x (float or Tensor): A hyperparameter of the optimizer. If it is Tensor, it is needed to be 1-element. Returns: float or Tensor: a scalar tensor if x is Tensor otherwise Python scalar (float) value.",
    "type": "function",
    "file_path": "pytorch\\torch\\optim\\optimizer.py",
    "ast_data": "FunctionDef name:_to_scalar arg:x arguments arg If BoolOp Call Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "has_barrier",
    "source_code": "@property\ndef has_barrier(self):\n    return self._worker_barrier is not None",
    "docstring": "Whether the barrier is set or not.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py",
    "ast_data": "FunctionDef name:has_barrier arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "axis_date",
    "source_code": "def axis_date(self, tz=None):\n    if isinstance(tz, str):\n        import dateutil.tz\n        tz = dateutil.tz.gettz(tz)\n    self.update_units(datetime.datetime(2009, 1, 1, 0, 0, 0, 0, tz))",
    "docstring": "Set up axis ticks and labels to treat data along this Axis as dates. Parameters ---------- tz : str or , default: :rc: The timezone used to create date labels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:axis_date arg:self arg:tz arguments arg arg If Call Assign Call Call Call"
  },
  {
    "library": "scipy",
    "name": "ifft2",
    "source_code": "def ifft2(x, shape=None, axes=(-2, -1), overwrite_x=False):\n    return ifftn(x, shape, axes, overwrite_x)",
    "docstring": "2-D discrete inverse Fourier transform of real or complex sequence. Return inverse 2-D discrete Fourier transform of arbitrary type sequence x. See for more information. See Also -------- fft2, ifft Examples -------- >>> import numpy as np >>> from scipy.fftpack import fft2, ifft2 >>> y = np.mgrid[:5, :5][0] >>> y array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]]) >>> np.allclose(y, fft2(ifft2(y))) True",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_basic.py",
    "ast_data": "FunctionDef name:ifft2 arg:x arg:shape arg:axes arg:overwrite_x arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "FixedLayout",
    "source_code": "class FixedLayout(Layout):\n\n    def make_indexer(self) -> Callable[[Sequence[Expr]], Expr]:\n\n        def indexer(index):\n            assert len(index) == len(self.stride)\n            assert len(index) == len(self.size)\n            result = self.offset\n            for idx, stride, sz in zip(index, self.stride, self.size):\n                if sz != 1:\n                    result = result + idx * stride\n            return result\n        return indexer",
    "docstring": "A Tensor layout we cannot change",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\ir.py",
    "ast_data": "ClassDef name:FixedLayout FunctionDef name:make_indexer arg:self arguments arg FunctionDef name:indexer arg:index arguments arg Compare Call Call Compare Call Call Assign For Call If Compare Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_input_mask_at",
    "source_code": "@doc_controls.do_not_doc_inheritable\ndef get_input_mask_at(self, node_index):\n    inputs = self.get_input_at(node_index)\n    if isinstance(inputs, list):\n        return [getattr(x, '_keras_mask', None) for x in inputs]\n    else:\n        return getattr(inputs, '_keras_mask', None)",
    "docstring": "Retrieves the input mask tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first time the layer was called. Returns: A mask tensor (or list of tensors if the layer has multiple inputs).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:get_input_mask_at arg:self arg:node_index arguments arg arg Assign Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "default_mesh",
    "source_code": "@tf_export('experimental.dtensor.default_mesh', v1=[])\n@contextlib.contextmanager\ndef default_mesh(mesh: layout_lib.Mesh):\n    if not isinstance(mesh, layout_lib.Mesh):\n        raise ValueError(f'Expect `mesh` to be `Mesh`, got {type(mesh)}')\n    with _dtensor_device()._experimental_default_mesh(mesh):\n        with ops.device(device_name()):\n            yield",
    "docstring": "Sets the default DTensor device mesh to use for enclosed functions. This function returns a scope. All the ops and tf.functions in this scope will default to this DTensor mesh if a mesh cannot be inferred from any of the inputs This is useful for wrapping any tf.function that doesn't take a DTensor as input but would like to produce DTensor as result. The scope will also make sure all small constants are replicated as DTensors. Args: mesh: A Mesh instance to extract a default mesh from. Yields: A context in which all ops and tf.functions will run on the given mesh.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py",
    "ast_data": "FunctionDef name:default_mesh arg:mesh arguments arg If Call Raise Call Call With Call Call With Call Call Call"
  },
  {
    "library": "scipy",
    "name": "eliminate_zeros",
    "source_code": "def eliminate_zeros(self):\n    mask = self.data != 0\n    self.data = self.data[mask]\n    self.coords = tuple((idx[mask] for idx in self.coords))",
    "docstring": "Remove zero entries from the array/matrix This is an *in place* operation",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_coo.py",
    "ast_data": "FunctionDef name:eliminate_zeros arg:self arguments arg Assign Compare Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "get_pull_request_files",
    "source_code": "def get_pull_request_files(project, num, auth=False):\n    url = f'https://api.github.com/repos/{project}/pulls/{num}/files'\n    if auth:\n        header = make_auth_header()\n    else:\n        header = None\n    return get_paged_request(url, headers=header)",
    "docstring": "get list of files in a pull request",
    "type": "function",
    "file_path": "matplotlib\\tools\\gh_api.py",
    "ast_data": "FunctionDef name:get_pull_request_files arg:project arg:num arg:auth arguments arg arg arg Assign If Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "tfdbg_run_id",
    "source_code": "def tfdbg_run_id(self):\n    return self._reader.tfdbg_run_id()",
    "docstring": "Get the debugger run ID of the debugged TensorFlow program.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:tfdbg_run_id arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_sample_odds_ratio",
    "source_code": "def _sample_odds_ratio(table):\n    if table[1, 0] > 0 and table[0, 1] > 0:\n        oddsratio = table[0, 0] * table[1, 1] / (table[1, 0] * table[0, 1])\n    elif table[0, 0] == 0 or table[1, 1] == 0:\n        oddsratio = np.nan\n    else:\n        oddsratio = np.inf\n    return oddsratio",
    "docstring": "Given a table [[a, b], [c, d]], compute a*d/(b*c). Return nan if the numerator and denominator are 0. Return inf if just the denominator is 0.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_odds_ratio.py",
    "ast_data": "FunctionDef name:_sample_odds_ratio arg:table arguments arg If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "output_all_intermediates",
    "source_code": "def output_all_intermediates():\n    if _EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE is not None:\n        return _EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE\n    if in_defun():\n        return False\n    if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()):\n        return False\n    if context.context().function_call_options.executor_type == 'SINGLE_THREADED_EXECUTOR':\n        return False\n    return _is_building_keras_layer()",
    "docstring": "Whether to output all intermediates of a functional control flow op. The default behavior is to output intermediates only when building a Keras Layer in graph mode and that too when certain other conditions are met: 1. We do not output intermediates if the functional control flow op is being built inside a FuncGraph which is not a If/While graph. This guards against outputting intermediates in eager mode since keras adds tensors to a FuncGraph named \"keras_graph\" in that case. Also because we do not output intermediates of tf.function (since this feature is only for backwards compatibility) outputting intermediates of functional control flow ops built inside tf.function is of no value. 2. We do not output intermediates when the compilation is using XLA or for a TPU. 3. We do not output intermediates when a single threaded executor is used since that does not perform inlining and pruning. Returns: A bool telling whether to output all intermediates.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py",
    "ast_data": "FunctionDef name:output_all_intermediates arguments If Compare Return return:yes If Call Return return:yes If Call Call Return return:yes If Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ChiefSessionCreator",
    "source_code": "@tf_export(v1=['train.ChiefSessionCreator'])\nclass ChiefSessionCreator(SessionCreator):\n\n    def __init__(self, scaffold=None, master='', config=None, checkpoint_dir=None, checkpoint_filename_with_path=None):\n        self._checkpoint_dir = checkpoint_dir\n        self._checkpoint_filename_with_path = checkpoint_filename_with_path\n        self._scaffold = scaffold or Scaffold()\n        self._session_manager = None\n        self._master = master\n        self._config = config\n\n    def _get_session_manager(self):\n        if self._session_manager:\n            return self._session_manager\n        self._session_manager = sm.SessionManager(local_init_op=self._scaffold.local_init_op, local_init_feed_dict=self._scaffold.local_init_feed_dict, ready_op=self._scaffold.ready_op, ready_for_local_init_op=self._scaffold.ready_for_local_init_op, graph=ops.get_default_graph())\n        return self._session_manager\n\n    def create_session(self):\n        self._scaffold.finalize()\n        return self._get_session_manager().prepare_session(self._master, saver=self._scaffold.saver, checkpoint_dir=self._checkpoint_dir, checkpoint_filename_with_path=self._checkpoint_filename_with_path, config=self._config, init_op=self._scaffold.init_op, init_feed_dict=self._scaffold.init_feed_dict, init_fn=self._scaffold.init_fn)",
    "docstring": "Creates a tf.compat.v1.Session for a chief.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "ClassDef name:ChiefSessionCreator FunctionDef name:__init__ arg:self arg:scaffold arg:master arg:config arg:checkpoint_dir arg:checkpoint_filename_with_path arguments arg arg arg arg arg arg Assign Assign Assign BoolOp Call Assign Assign Assign FunctionDef name:_get_session_manager arg:self arguments arg If Return return:yes Assign Call Call Return return:yes FunctionDef name:create_session arg:self arguments arg Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "add_event_data",
    "source_code": "def add_event_data(self, event_name: str, **kwargs) -> None:\n    if event_name not in self.get_stack():\n        raise RuntimeError(f\"Event {repr(event_name)} not in {self.get_stack()}. Cannot add metadata to events that aren't in progress. Please make sure the event has started and hasn't ended.\")\n    event_data = self.get_event_data()\n    if event_name not in event_data:\n        event_data[event_name] = {}\n    event_data[event_name].update(kwargs)",
    "docstring": "Adds additional metadata info to an in-progress event This metadata is recorded in the END event",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:add_event_data arg:self arg:event_name arguments arg arg arg If Compare Call Raise Call Call Call Assign Call If Compare Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "autofmt_xdate",
    "source_code": "def autofmt_xdate(self, bottom=0.2, rotation=30, ha='right', which='major'):\n    _api.check_in_list(['major', 'minor', 'both'], which=which)\n    axes = [ax for ax in self.axes if ax._label != '<colorbar>']\n    allsubplots = all((ax.get_subplotspec() for ax in axes))\n    if len(axes) == 1:\n        for label in self.axes[0].get_xticklabels(which=which):\n            label.set_ha(ha)\n            label.set_rotation(rotation)\n    elif allsubplots:\n        for ax in axes:\n            if ax.get_subplotspec().is_last_row():\n                for label in ax.get_xticklabels(which=which):\n                    label.set_ha(ha)\n                    label.set_rotation(rotation)\n            else:\n                for label in ax.get_xticklabels(which=which):\n                    label.set_visible(False)\n                ax.set_xlabel('')\n    engine = self.get_layout_engine()\n    if allsubplots and (engine is None or engine.adjust_compatible):\n        self.subplots_adjust(bottom=bottom)\n    self.stale = True",
    "docstring": "Date ticklabels often overlap, so it is useful to rotate them and right align them. Also, a common use case is a number of subplots with shared x-axis where the x-axis is date data. The ticklabels are often long, and it helps to rotate them on the bottom subplot and turn them off on other subplots, as well as turn off xlabels. Parameters ---------- bottom : float, default: 0.2 The bottom of the subplots for . rotation : float, default: 30 degrees The rotation angle of the xtick labels in degrees. ha : {'left', 'center', 'right'}, default: 'right' The horizontal alignment of the xticklabels. which : {'major', 'minor', 'both'}, default: 'major' Selects which ticklabels to rotate.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:autofmt_xdate arg:self arg:bottom arg:rotation arg:ha arg:which arguments arg arg arg arg arg Call Assign Compare Assign Call Call If Compare Call For Call Call Call If For If Call Call For Call Call Call For Call Call Call Assign Call If BoolOp BoolOp Compare Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "build_shuffle_all_reduce",
    "source_code": "def build_shuffle_all_reduce(input_tensors, gather_devices, red_op, un_op=None):\n    input_tensors, shape = _flatten_tensors(input_tensors)\n    dst_devices = [t.device for t in input_tensors]\n    reduced_shards = _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op)\n    output_tensors = _build_shuffle_scatter(reduced_shards, dst_devices)\n    if len(shape) != 1:\n        output_tensors = _reshape_tensors(output_tensors, shape)\n    return output_tensors",
    "docstring": "Construct a subgraph for shuffle all-reduce. Shuffle reduce is essentially the algorithm implemented when using parameter servers. Suppose tensor length is n, there are d devices and g gather shards. Each device sends a n/g length sub-tensor to each gather shard. The gather shards perform a reduction across d fragments, then broadcast the result back to each device. The devices then join the g fully reduced fragments they receive from the shards. The gather shards could perform d-1 pairwise reductions, or one d-way reduction. The first is better where reduction Op time is low compared to transmission time, the second better in the other case. Args: input_tensors: list of values to be reduced. gather_devices: list of names of devices on which reduction shards should be placed. red_op: an n-array elementwise reduction Op un_op: optional elementwise unary Op to be applied to fully-reduced values. Returns: list of which are the fully reduced tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:build_shuffle_all_reduce arg:input_tensors arg:gather_devices arg:red_op arg:un_op arguments arg arg arg arg Assign Call Assign Assign Call Assign Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_check_precomputed_gram_matrix",
    "source_code": "def _check_precomputed_gram_matrix(X, precompute, X_offset, X_scale, rtol=None, atol=1e-05):\n    n_features = X.shape[1]\n    f1 = n_features // 2\n    f2 = min(f1 + 1, n_features - 1)\n    v1 = (X[:, f1] - X_offset[f1]) * X_scale[f1]\n    v2 = (X[:, f2] - X_offset[f2]) * X_scale[f2]\n    expected = np.dot(v1, v2)\n    actual = precompute[f1, f2]\n    dtypes = [precompute.dtype, expected.dtype]\n    if rtol is None:\n        rtols = [0.0001 if dtype == np.float32 else 1e-07 for dtype in dtypes]\n        rtol = max(rtols)\n    if not np.isclose(expected, actual, rtol=rtol, atol=atol):\n        raise ValueError(f\"Gram matrix passed in via 'precompute' parameter did not pass validation when a single element was checked - please check that it was computed properly. For element ({f1},{f2}) we computed {expected} but the user-supplied value was {actual}.\")",
    "docstring": "Computes a single element of the gram matrix and compares it to the corresponding element of the user supplied gram matrix. If the values do not match a ValueError will be thrown. Parameters ---------- X : ndarray of shape (n_samples, n_features) Data array. precompute : array-like of shape (n_features, n_features) User-supplied gram matrix. X_offset : ndarray of shape (n_features,) Array of feature means used to center design matrix. X_scale : ndarray of shape (n_features,) Array of feature scale factors used to normalize design matrix. rtol : float, default=None Relative tolerance; see numpy.allclose If None, it is set to 1e-4 for arrays of dtype numpy.float32 and 1e-7 otherwise. atol : float, default=1e-5 absolute tolerance; see :func. Note that the default here is more tolerant than the default for :func:, where . Raises ------ ValueError Raised when the provided Gram matrix is not consistent.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py",
    "ast_data": "FunctionDef name:_check_precomputed_gram_matrix arg:X arg:precompute arg:X_offset arg:X_scale arg:rtol arg:atol arguments arg arg arg arg arg arg Assign Assign Assign Call Assign Assign Assign Call Assign Assign If Compare Assign Compare Assign Call If Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_process_encoded_graph_def_in_chunks",
    "source_code": "def _process_encoded_graph_def_in_chunks(self, event, graph_def_chunks):\n    graph_def = graph_pb2.GraphDef()\n    index_bar_0 = event.graph_def.find(b'|')\n    index_bar_1 = event.graph_def.find(b'|', index_bar_0 + 1)\n    index_bar_2 = event.graph_def.find(b'|', index_bar_1 + 1)\n    graph_def_hash_device_timestamp = event.graph_def[:index_bar_0]\n    chunk_index = int(event.graph_def[index_bar_0 + 1:index_bar_1])\n    num_chunks = int(event.graph_def[index_bar_1 + 1:index_bar_2])\n    if graph_def_hash_device_timestamp not in graph_def_chunks:\n        graph_def_chunks[graph_def_hash_device_timestamp] = [None] * num_chunks\n    graph_def_chunks[graph_def_hash_device_timestamp][chunk_index] = event.graph_def[index_bar_2 + 1:]\n    if all(graph_def_chunks[graph_def_hash_device_timestamp]):\n        device_name = graph_def_hash_device_timestamp.split(b',')[1]\n        wall_time = int(graph_def_hash_device_timestamp.split(b',')[2])\n        graph_def.ParseFromString(b''.join(graph_def_chunks[graph_def_hash_device_timestamp]))\n        del graph_def_chunks[graph_def_hash_device_timestamp]\n        self._process_graph_def(graph_def)\n        return (graph_def, device_name, wall_time)\n    else:\n        return (None, None, None)",
    "docstring": "Process an Event proto containing a chunk of encoded GraphDef. Args: event: the Event proto containing the chunk of encoded GraphDef. graph_def_chunks: A dict mapping keys for GraphDefs (i.e., \",,\") to a list of chunks of encoded GraphDefs. Returns: If all chunks of the GraphDef have arrived, return decoded GraphDef proto, device name, wall_time. Otherwise, return None, None, None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\grpc_debug_server.py",
    "ast_data": "FunctionDef name:_process_encoded_graph_def_in_chunks arg:self arg:event arg:graph_def_chunks arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Assign Assign Call Assign Call If Compare Assign Assign If Call Assign Call Assign Call Call Call Call Call Return return:yes Return return:no"
  },
  {
    "library": "pandas",
    "name": "sp_index",
    "source_code": "@property\ndef sp_index(self) -> SparseIndex:\n    return self._sparse_index",
    "docstring": "The SparseIndex containing the location of non- `` points.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:sp_index arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "TextLineDatasetV1",
    "source_code": "@tf_export(v1=['data.TextLineDataset'])\nclass TextLineDatasetV1(dataset_ops.DatasetV1Adapter):\n\n    def __init__(self, filenames, compression_type=None, buffer_size=None, num_parallel_reads=None, name=None):\n        wrapped = TextLineDatasetV2(filenames, compression_type, buffer_size, num_parallel_reads, name)\n        super(TextLineDatasetV1, self).__init__(wrapped)\n    __init__.__doc__ = TextLineDatasetV2.__init__.__doc__\n\n    @property\n    def _filenames(self):\n        return self._dataset._filenames\n\n    @_filenames.setter\n    def _filenames(self, value):\n        self._dataset._filenames = value",
    "docstring": "A comprising lines from one or more text files.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\readers.py",
    "ast_data": "ClassDef name:TextLineDatasetV1 FunctionDef name:__init__ arg:self arg:filenames arg:compression_type arg:buffer_size arg:num_parallel_reads arg:name arguments arg arg arg arg arg arg Assign Call Call Call Assign FunctionDef name:_filenames arg:self arguments arg Return return:yes FunctionDef name:_filenames arg:self arg:value arguments arg arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "DeterministicAlgorithmsVariable",
    "source_code": "class DeterministicAlgorithmsVariable(ContextWrappingVariable):\n    _guards_singleton = Guard(GlobalStateSource(), GuardBuilder.DETERMINISTIC_ALGORITHMS)\n\n    @staticmethod\n    def create(tx: 'InstructionTranslator', target_value, **kwargs):\n        var = DeterministicAlgorithmsVariable(target_values=[target_value], initial_values=[torch.are_deterministic_algorithms_enabled()], **kwargs)\n        var._call_func(tx, [target_value])\n        var.set_cleanup_hook(tx)\n        return var\n\n    def __init__(self, target_values, initial_values=None, **kwargs) -> None:\n        super().__init__(target_values=target_values, initial_values=initial_values, **kwargs)\n        install_guard(self._guards_singleton)\n\n    def enter(self, tx):\n        return variables.ConstantVariable.create(None)\n\n    def _call_func(self, tx: 'InstructionTranslator', values):\n        assert len(values) == 1\n        value = values[0]\n        (tx.output.create_node('call_function', torch._C._set_deterministic_algorithms, (value,), {}),)\n        torch._C._set_deterministic_algorithms(value)\n\n    def module_name(self):\n        return 'torch'\n\n    def fn_name(self):\n        return 'use_deterministic_algorithms'",
    "docstring": "represents torch.{are_deterministic_algorithms_enabled,use_deterministic_algorithms}()",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\ctx_manager.py",
    "ast_data": "ClassDef name:DeterministicAlgorithmsVariable Assign Call Call FunctionDef name:create arg:tx arg:target_value arguments arg arg arg Assign Call Call Call Call Return return:yes FunctionDef name:__init__ arg:self arg:target_values arg:initial_values arguments arg arg arg arg Call Call Call FunctionDef name:enter arg:self arg:tx arguments arg arg Return return:yes Call FunctionDef name:_call_func arg:self arg:tx arg:values arguments arg arg arg Compare Call Assign Call Call FunctionDef name:module_name arg:self arguments arg Return return:yes FunctionDef name:fn_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "chunk_vmap",
    "source_code": "def chunk_vmap(func: Callable, in_dims: in_dims_t=0, out_dims: out_dims_t=0, randomness: str='error', chunks=2) -> Callable:\n    _check_randomness_arg(randomness)\n    if chunks == 1:\n        return vmap(func, in_dims=in_dims, out_dims=out_dims, randomness=randomness)\n\n    def _get_chunk_flat_args(flat_args_, flat_in_dims_, chunks_):\n        flat_args_chunks = tuple((t.chunk(chunks_, dim=in_dim) if in_dim is not None else [t] * chunks_ for t, in_dim in zip(flat_args_, flat_in_dims_)))\n        chunks_flat_args = zip(*flat_args_chunks)\n        return chunks_flat_args\n\n    @functools.wraps(func)\n    def wrapped_with_chunks(*args, **kwargs):\n        _check_out_dims_is_int_or_int_pytree(out_dims, func)\n        _, flat_in_dims, flat_args, args_spec = _process_batched_inputs(in_dims, args, func)\n        chunks_flat_args = _get_chunk_flat_args(flat_args, flat_in_dims, chunks)\n        return _chunked_vmap(func, flat_in_dims, chunks_flat_args, args_spec, out_dims, randomness, **kwargs)\n    return wrapped_with_chunks",
    "docstring": "chunk_vmap is the vectorizing map (vmap) using chunks of input data. It is a mix of vmap (which vectorizes everything) and map (which executes things sequentially). `vmapvmapvmap`.",
    "type": "function",
    "file_path": "pytorch\\torch\\_functorch\\apis.py",
    "ast_data": "FunctionDef name:chunk_vmap arg:func arg:in_dims arg:out_dims arg:randomness arg:chunks arguments arg arg arg arg arg Call If Compare Return return:yes Call FunctionDef name:_get_chunk_flat_args arg:flat_args_ arg:flat_in_dims_ arg:chunks_ arguments arg arg arg Assign Call Compare Call Call Assign Call Return return:yes FunctionDef name:wrapped_with_chunks arguments arg arg Call Assign Call Assign Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "resource_handle",
    "source_code": "@property\ndef resource_handle(self):\n    if self._resource_handle is None:\n        with ops.device(self._resource_device):\n            self._resource_handle = self._create_resource()\n    return self._resource_handle",
    "docstring": "Returns the resource handle associated with this Resource.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\resource.py",
    "ast_data": "FunctionDef name:resource_handle arg:self arguments arg If Compare With Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ProcessRaisedException",
    "source_code": "class ProcessRaisedException(ProcessException):\n\n    def __init__(self, msg: str, error_index: int, error_pid: int):\n        super().__init__(msg, error_index, error_pid)",
    "docstring": "Exception raised when a process failed due to an exception raised by the code.",
    "type": "class",
    "file_path": "pytorch\\torch\\multiprocessing\\spawn.py",
    "ast_data": "ClassDef name:ProcessRaisedException FunctionDef name:__init__ arg:self arg:msg arg:error_index arg:error_pid arguments arg arg arg arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "_WrappedTritonKernel",
    "source_code": "class _WrappedTritonKernel:\n\n    def __init__(self, kernel):\n        self.kernel = kernel\n        self.kernel_invoked = False\n\n    def __call__(self, *args, **kwargs):\n        res = self.kernel(*args, **kwargs)\n        self.kernel_invoked = True\n        return res",
    "docstring": "Just a simple wrapper to store some metadata for testing purposes.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\__init__.py",
    "ast_data": "ClassDef name:_WrappedTritonKernel FunctionDef name:__init__ arg:self arg:kernel arguments arg arg Assign Assign FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "solve_event_equation",
    "source_code": "def solve_event_equation(event, sol, t_old, t):\n    from scipy.optimize import brentq\n    return brentq(lambda t: event(t, sol(t)), t_old, t, xtol=4 * EPS, rtol=4 * EPS)",
    "docstring": "Solve an equation corresponding to an ODE event. The equation is `scipy.optimize.brentqt_oldt`. t_old, t : float Previous and new values of time. They will be used as a bracketing interval. Returns ------- root : float Found solution.",
    "type": "function",
    "file_path": "scipy\\scipy\\integrate\\_ivp\\ivp.py",
    "ast_data": "FunctionDef name:solve_event_equation arg:event arg:sol arg:t_old arg:t arguments arg arg arg arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "is_pinned",
    "source_code": "def is_pinned(self) -> bool:\n    return self._metadata.tensor_properties.pin_memory",
    "docstring": "Returns True if the sharded tensor (each local shard) resides in pinned memory.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py",
    "ast_data": "FunctionDef name:is_pinned arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "BooleanParser",
    "source_code": "class BooleanParser(jinja2.parser.Parser):\n\n    def parse_compare(self) -> jinja2.nodes.Expr:\n        node: jinja2.nodes.Expr\n        token = self.stream.current\n        if token.type == 'name':\n            if token.value in {'true', 'True'}:\n                node = jinja2.nodes.Const(True, lineno=token.lineno)\n            elif token.value in {'false', 'False'}:\n                node = jinja2.nodes.Const(False, lineno=token.lineno)\n            elif token.value in {'none', 'None'}:\n                node = jinja2.nodes.Const(None, lineno=token.lineno)\n            else:\n                node = jinja2.nodes.Name(token.value, 'load', lineno=token.lineno)\n            next(self.stream)\n        elif token.type == 'lparen':\n            next(self.stream)\n            node = self.parse_expression()\n            self.stream.expect('rparen')\n        else:\n            self.fail(f\"unexpected token '{token}'\", token.lineno)\n        return node",
    "docstring": "Only allow conditional expressions and binary operators.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\tags.py",
    "ast_data": "ClassDef name:BooleanParser FunctionDef name:parse_compare arg:self arguments arg Assign If Compare If Compare Assign Call If Compare Assign Call If Compare Assign Call Assign Call Call If Compare Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "sparse_mask",
    "source_code": "@tf_export('sparse.mask', v1=['sparse.mask', 'sparse_mask'])\n@deprecation.deprecated_endpoints('sparse_mask')\ndef sparse_mask(a, mask_indices, name=None):\n    with ops.name_scope(name, 'sparse_mask', [a, mask_indices]) as name:\n        indices = a.indices\n        out_indices, to_gather = gen_array_ops.list_diff(indices, mask_indices)\n        out_values = gather(a.values, to_gather, name=name)\n        return indexed_slices.IndexedSlices(out_values, out_indices, a.dense_shape)",
    "docstring": "Masks elements of . Given an instance , returns another that contains a subset of the slices of . Only the slices at indices not specified in are returned. This is useful when you need to extract a subset of slices in an object. For example: Args: a: An instance. mask_indices: Indices of elements to mask. name: A name for the operation (optional). Returns: The masked instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:sparse_mask arg:a arg:mask_indices arg:name arguments arg arg arg With Call Assign Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_node_not_implemented",
    "source_code": "def _node_not_implemented(node_name: str) -> Callable[..., None]:\n\n    def f(self, *args, **kwargs):\n        raise NotImplementedError(f\"'{node_name}' nodes are not implemented\")\n    return f",
    "docstring": "Return a function that raises a NotImplementedError with a passed node name.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\expr.py",
    "ast_data": "FunctionDef name:_node_not_implemented arg:node_name arguments arg FunctionDef name:f arg:self arguments arg arg arg Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_text_list",
    "source_code": "@keep_lazy_text\ndef get_text_list(list_, last_word=gettext_lazy('or')):\n    if not list_:\n        return ''\n    if len(list_) == 1:\n        return str(list_[0])\n    return '%s %s %s' % (_(', ').join((str(i) for i in list_[:-1])), str(last_word), str(list_[-1]))",
    "docstring": ">>> get_text_list(['a', 'b', 'c', 'd']) 'a, b, c or d' >>> get_text_list(['a', 'b', 'c'], 'and') 'a, b and c' >>> get_text_list(['a', 'b'], 'and') 'a and b' >>> get_text_list(['a']) 'a' >>> get_text_list([]) ''",
    "type": "function",
    "file_path": "django\\django\\utils\\text.py",
    "ast_data": "FunctionDef name:get_text_list arg:list_ arg:last_word arguments arg arg Call If Return return:yes If Compare Call Return return:yes Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pygame",
    "name": "get_buffer",
    "source_code": "def get_buffer(self):\n    return self.dev.getbuffer()",
    "docstring": "Returns a string containing the raw pixel data.",
    "type": "method",
    "file_path": "pygame\\src_py\\_camera_vidcapture.py",
    "ast_data": "FunctionDef name:get_buffer arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_tick_label_size",
    "source_code": "def _get_tick_label_size(self, axis_name):\n    tick_kw = self._major_tick_kw\n    size = tick_kw.get('labelsize', mpl.rcParams[f'{axis_name}tick.labelsize'])\n    return mtext.FontProperties(size=size).get_size_in_points()",
    "docstring": "Return the text size of tick labels for this Axis. This is a convenience function to avoid having to create a in , since it is expensive.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_get_tick_label_size arg:self arg:axis_name arguments arg arg Assign Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_tensorarray_get_item",
    "source_code": "def _tf_tensorarray_get_item(target, i):\n    return target.read(i)",
    "docstring": "Overload of get_item that stages a TensorArray read.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\slices.py",
    "ast_data": "FunctionDef name:_tf_tensorarray_get_item arg:target arg:i arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "transformed",
    "source_code": "def transformed(self, transform):\n    return Path(transform.transform(self.vertices), self.codes, self._interpolation_steps)",
    "docstring": "Return a transformed copy of the path. See Also -------- matplotlib.transforms.TransformedPath A specialized path class that will cache the transformed result and automatically update when the transform changes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:transformed arg:self arg:transform arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_compare_prepare_convert_qconfig_mappings",
    "source_code": "def _compare_prepare_convert_qconfig_mappings(prepare_qconfig_mapping: QConfigMapping, convert_qconfig_mapping: QConfigMapping):\n    assert qconfig_equals(prepare_qconfig_mapping.global_qconfig, convert_qconfig_mapping.global_qconfig), 'Expected global qconfigs to be the same in the prepare and convert quantization configs'\n    prepare_dicts: list[OrderedDict] = [prepare_qconfig_mapping.object_type_qconfigs, prepare_qconfig_mapping.module_name_qconfigs, prepare_qconfig_mapping.module_name_regex_qconfigs]\n    convert_dicts: list[OrderedDict] = [convert_qconfig_mapping.object_type_qconfigs, convert_qconfig_mapping.module_name_qconfigs, convert_qconfig_mapping.module_name_regex_qconfigs]\n    dict_names = [_OBJECT_TYPE_DICT_KEY, _MODULE_NAME_DICT_KEY, _MODULE_NAME_REGEX_DICT_KEY]\n    for i in range(len(prepare_dicts)):\n        for name in prepare_dicts[i].keys():\n            assert name in convert_dicts[i], f'Missing key {dict_names[i]} {name} in convert QConfigMapping                 when it was present in prepare'\n            assert convert_dicts[i][name] is None or qconfig_equals(prepare_dicts[i][name], convert_dicts[i][name]), f'Expected convert QConfigMapping to have the same qconfig as prepare for key {dict_names[i]} {name};                 prepare: {prepare_dicts[i][name]}; convert: {convert_dicts[i][name]}'",
    "docstring": "Compare the qconfig_mapping passed in convert to the one from prepare and check the values Args: : configuration for prepare quantization step : configuration for convert quantization step",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\qconfig_mapping_utils.py",
    "ast_data": "FunctionDef name:_compare_prepare_convert_qconfig_mappings arg:prepare_qconfig_mapping arg:convert_qconfig_mapping arguments arg arg Call Assign For Call Call For Call Compare BoolOp Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "def predict_log_proba(self, X):\n    proba = self.predict_proba(X)\n    return np.log(proba)",
    "docstring": "Predict class log-probabilities for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to `classes_` does not support probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "reset",
    "source_code": "def reset(self) -> 'QMCEngine':\n    rng = copy.deepcopy(self.rng_seed)\n    self.rng = check_random_state(rng)\n    self.num_generated = 0\n    return self",
    "docstring": "Reset the engine to base state. Returns ------- engine : QMCEngine Engine reset to its base state.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:reset arg:self arguments arg Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "index",
    "source_code": "@cherrypy.expose\ndef index(self):\n    return '\\n            <p>Here are some useful links:</p>\\n\\n            <ul>\\n                <li>\\n                    <a href=\"http://www.cherrypy.dev\">The CherryPy Homepage</a>\\n                </li>\\n                <li>\\n                    <a href=\"http://www.python.org\">The Python Homepage</a>\\n                </li>\\n            </ul>\\n\\n            <p>You can check out some extra useful\\n            links <a href=\"./extra/\">here</a>.</p>\\n\\n            <p>[<a href=\"../\">Return</a>]</p>\\n        '",
    "docstring": "Produce HTTP response body of links page app index URI.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\tutorial\\tut04_complex_site.py",
    "ast_data": "FunctionDef name:index arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "nms3d",
    "source_code": "def nms3d(input: Tensor, kernel_size: tuple[int, int, int], mask_only: bool=False) -> Tensor:\n    return NonMaximaSuppression3d(kernel_size)(input, mask_only)",
    "docstring": "Apply non maxima suppression to filter. See :class: for details.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\subpix\\nms.py",
    "ast_data": "FunctionDef name:nms3d arg:input arg:kernel_size arg:mask_only arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "is_default",
    "source_code": "def is_default(method):\n    return getattr(method, '_is_default', False)",
    "docstring": "Check if a method is decorated with the wrapper.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py",
    "ast_data": "FunctionDef name:is_default arg:method arguments arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "handle_fk_field",
    "source_code": "def handle_fk_field(self, obj, field):\n    self._start_relational_field(field)\n    related_att = getattr(obj, field.attname)\n    if related_att is not None:\n        if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):\n            related = getattr(obj, field.name)\n            related = related.natural_key()\n            for key_value in related:\n                self.xml.startElement('natural', {})\n                self.xml.characters(str(key_value))\n                self.xml.endElement('natural')\n        else:\n            self.xml.characters(str(related_att))\n    else:\n        self.xml.addQuickElement('None')\n    self.xml.endElement('field')",
    "docstring": "Handle a ForeignKey (they need to be treated slightly differently from regular fields).",
    "type": "method",
    "file_path": "django\\django\\core\\serializers\\xml_serializer.py",
    "ast_data": "FunctionDef name:handle_fk_field arg:self arg:obj arg:field arguments arg arg arg Call Assign Call If Compare If BoolOp Call Assign Call Assign Call For Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, soft_quantized_output: torch.Tensor, original_output: torch.Tensor, V: torch.Tensor, curr_iter: int) -> tuple[torch.Tensor, torch.Tensor]:\n    regularization_term = self.rounding_regularization(V, curr_iter)\n    reconstruction_term = self.reconstruction_loss(soft_quantized_output, original_output)\n    return (regularization_term, reconstruction_term)",
    "docstring": "Compute the asymmetric reconstruction formulation as eq [25]",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\experimental\\adaround_loss.py",
    "ast_data": "FunctionDef name:forward arg:self arg:soft_quantized_output arg:original_output arg:V arg:curr_iter arguments arg arg arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "validate_categories",
    "source_code": "@staticmethod\ndef validate_categories(categories, fastpath: bool=False) -> Index:\n    from pandas.core.indexes.base import Index\n    if not fastpath and (not is_list_like(categories)):\n        raise TypeError(f\"Parameter 'categories' must be list-like, was {categories!r}\")\n    if not isinstance(categories, ABCIndex):\n        categories = Index._with_infer(categories, tupleize_cols=False)\n    if not fastpath:\n        if categories.hasnans:\n            raise ValueError('Categorical categories cannot be null')\n        if not categories.is_unique:\n            raise ValueError('Categorical categories must be unique')\n    if isinstance(categories, ABCCategoricalIndex):\n        categories = categories.categories\n    return categories",
    "docstring": "Validates that we have good categories Parameters ---------- categories : array-like fastpath : bool Whether to skip nan and uniqueness checks Returns ------- categories : Index",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:validate_categories arg:categories arg:fastpath arguments arg arg If BoolOp Call Raise Call If Call Assign Call If If Raise Call If Raise Call If Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "top_right",
    "source_code": "@property\ndef top_right(self) -> torch.Tensor:\n    out = self.top_left\n    out[..., 0] += self.width\n    return out",
    "docstring": "The [x y] position of the top-left coordinate of the bounding box.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:top_right arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "patch_dynamo_config",
    "source_code": "def patch_dynamo_config(arg1: Optional[Union[str, dict[str, Any], tuple[tuple[str, Any], ...]]]=None, arg2: Any=None, **kwargs: Any) -> DynamoConfigPatchProxy:\n    if isinstance(arg1, tuple):\n        arg1 = dict(arg1)\n    config_patch = torch._dynamo.config.patch(arg1, arg2, **kwargs)\n    _patch_dynamo_config_check(config_patch.changes)\n    return DynamoConfigPatchProxy(config_patch)",
    "docstring": "A wrapper around torch._dynamo.config.patch that can be traced by Dynamo to temporarily change config values DURING tracing. See _allowed_config_patches for the list of allowed config patches. Arguments are the same as with torch._dynamo.confing.patch. Can be used as a decorator or a context manager. User code SHOULD NOT MODIFY the return value of this function. WARNING: changing Dynamo config during tracing can lead to unpredictable tracing behavior! Proceed only as advised!",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\decorators.py",
    "ast_data": "FunctionDef name:patch_dynamo_config arg:arg1 arg:arg2 arguments arg arg arg If Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> X448PublicKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x448.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "matplotlib",
    "name": "set_verts",
    "source_code": "def set_verts(self, verts, closed=True):\n    self.stale = True\n    if isinstance(verts, np.ma.MaskedArray):\n        verts = verts.astype(float).filled(np.nan)\n    if not closed:\n        self._paths = [mpath.Path(xy) for xy in verts]\n        return\n    if isinstance(verts, np.ndarray) and len(verts.shape) == 3 and verts.size:\n        verts_pad = np.concatenate((verts, verts[:, :1]), axis=1)\n        template_path = mpath.Path(verts_pad[0], closed=True)\n        codes = template_path.codes\n        _make_path = mpath.Path._fast_from_codes_and_verts\n        self._paths = [_make_path(xy, codes, internals_from=template_path) for xy in verts_pad]\n        return\n    self._paths = []\n    for xy in verts:\n        if len(xy):\n            self._paths.append(mpath.Path._create_closed(xy))\n        else:\n            self._paths.append(mpath.Path(xy))",
    "docstring": "Set the vertices of the polygons. Parameters ---------- verts : list of array-like The sequence of polygons [*verts0*, *verts1*, ...] where each element *verts_i* defines the vertices of polygon *i* as a 2D array-like of shape (M, 2). closed : bool, default: True Whether the polygon should be closed by adding a CLOSEPOLY connection at the end.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_verts arg:self arg:verts arg:closed arguments arg arg arg Assign If Call Assign Call Call If Assign Call Return return:no If BoolOp Call Compare Call Assign Call Assign Call Assign Assign Assign Call Return return:no Assign For If Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "is_scalar_nan",
    "source_code": "def is_scalar_nan(x):\n    return not isinstance(x, numbers.Integral) and isinstance(x, numbers.Real) and math.isnan(x)",
    "docstring": "Test if x is NaN. This function is meant to overcome the issue that np.isnan does not allow non-numerical types as input, and that np.nan is not float('nan'). Parameters ---------- x : any type Any scalar value. Returns ------- bool Returns true if x is NaN, and false otherwise. Examples -------- >>> import numpy as np >>> from sklearn.utils._missing import is_scalar_nan >>> is_scalar_nan(np.nan) True >>> is_scalar_nan(float(\"nan\")) True >>> is_scalar_nan(None) False >>> is_scalar_nan(\"\") False >>> is_scalar_nan([np.nan]) False",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_missing.py",
    "ast_data": "FunctionDef name:is_scalar_nan arg:x arguments arg Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_init_tokens_op",
    "source_code": "def get_init_tokens_op(self, num_tokens=-1):\n    if self._gradients_applied is False:\n        raise ValueError('get_init_tokens_op() should be called after apply_gradients().')\n    tokens_needed = self._replicas_to_aggregate - self._total_num_replicas\n    if num_tokens == -1:\n        num_tokens = self._replicas_to_aggregate\n    elif num_tokens < tokens_needed:\n        raise ValueError('Too few tokens to finish the first step: %d (given) vs %d (needed)' % (num_tokens, tokens_needed))\n    if num_tokens > 0:\n        with ops.device(self._global_step.device), ops.name_scope(''):\n            tokens = array_ops.fill([num_tokens], self._global_step)\n            init_tokens = self._sync_token_queue.enqueue_many((tokens,))\n    else:\n        init_tokens = control_flow_ops.no_op(name='no_init_tokens')\n    return init_tokens",
    "docstring": "Returns the op to fill the sync_token_queue with the tokens. This is supposed to be executed in the beginning of the chief/sync thread so that even if the total_num_replicas is less than replicas_to_aggregate, the model can still proceed as the replicas can compute multiple steps per variable update. Make sure: . Args: num_tokens: Number of tokens to add to the queue. Returns: An op for the chief/sync replica to fill the token queue. Raises: ValueError: If this is called before apply_gradients(). ValueError: If num_tokens are smaller than replicas_to_aggregate - total_num_replicas.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\sync_replicas_optimizer.py",
    "ast_data": "FunctionDef name:get_init_tokens_op arg:self arg:num_tokens arguments arg arg If Compare Raise Call Assign If Compare Assign If Compare Raise Call If Compare With Call Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "draft_export",
    "source_code": "def draft_export(mod: torch.nn.Module, args: tuple[Any, ...], kwargs: Optional[dict[str, Any]]=None, *, dynamic_shapes: Optional[Union[dict[str, Any], tuple[Any], list[Any]]]=None, preserve_module_call_signature: tuple[str, ...]=(), strict: bool=False) -> ExportedProgram:\n    from ._draft_export import draft_export\n    return draft_export(mod=mod, args=args, kwargs=kwargs, dynamic_shapes=dynamic_shapes, preserve_module_call_signature=preserve_module_call_signature, strict=strict)",
    "docstring": "A version of torch.export.export which is designed to consistently produce an ExportedProgram, even if there are potential soundness issues, and to generate a report listing the issues found.",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\__init__.py",
    "ast_data": "FunctionDef name:draft_export arg:mod arg:args arg:kwargs arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit_predict",
    "source_code": "def fit_predict(self, X, y=None):\n    return super().fit_predict(X, y)",
    "docstring": "Fit clustering from features/affinity matrix; return cluster labels. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), or array-like of shape (n_samples, n_samples) Training instances to cluster, or similarities / affinities between instances if ``. y : Ignored Not used, present here for API consistency by convention. Returns ------- labels : ndarray of shape (n_samples,) Cluster labels.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_affinity_propagation.py",
    "ast_data": "FunctionDef name:fit_predict arg:self arg:X arg:y arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_validate",
    "source_code": "@classmethod\ndef _validate(cls, left, right, dtype: IntervalDtype) -> None:\n    if not isinstance(dtype, IntervalDtype):\n        msg = f'invalid dtype: {dtype}'\n        raise ValueError(msg)\n    if len(left) != len(right):\n        msg = 'left and right must have the same length'\n        raise ValueError(msg)\n    left_mask = notna(left)\n    right_mask = notna(right)\n    if not (left_mask == right_mask).all():\n        msg = 'missing values must be missing in the same location both left and right sides'\n        raise ValueError(msg)\n    if not (left[left_mask] <= right[left_mask]).all():\n        msg = 'left side of interval must be <= right side'\n        raise ValueError(msg)",
    "docstring": "Verify that the IntervalArray is valid. Checks that * dtype is correct * left and right match lengths * left and right have the same missing values * left is always below right",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\interval.py",
    "ast_data": "FunctionDef name:_validate arg:cls arg:left arg:right arg:dtype arguments arg arg arg arg If Call Assign Raise Call If Compare Call Call Assign Raise Call Assign Call Assign Call If Call Compare Assign Raise Call If Call Compare Assign Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_append_qdq",
    "source_code": "def _append_qdq(x, is_per_channel, is_bias, kwargs):\n    per_channel_axis = 0\n    scale_key = 'bias_scale' if is_bias else 'weight_scale'\n    zp_key = 'bias_zero_point' if is_bias else 'weight_zero_point'\n    scale = kwargs[scale_key] if is_per_channel else 1.0\n    zp = kwargs[zp_key] if is_per_channel else 0\n    qmin = -127\n    qmax = 127\n    dtype = torch.int8\n    qd = torch.ops.quantized_decomposed\n    if is_per_channel:\n        x = qd.quantize_per_channel(x, scale, zp, per_channel_axis, qmin, qmax, dtype)\n        x = qd.dequantize_per_channel(x, scale, zp, per_channel_axis, qmin, qmax, dtype)\n    else:\n        x = qd.quantize_per_tensor(x, scale, zp, qmin, qmax, dtype)\n        x = qd.dequantize_per_tensor(x, scale, zp, qmin, qmax, dtype)\n    return x",
    "docstring": "Helper function to append q-dq ops after , using dummy values for the qparams and qmin/qmax. We use dummy values here because we match with and will manually replace these values after subgraph rewriting. Return the dq node.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_append_qdq arg:x arg:is_per_channel arg:is_bias arg:kwargs arguments arg arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign If Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "bgr_to_rgb",
    "source_code": "def bgr_to_rgb(image: Tensor) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W).Got {image.shape}')\n    out: Tensor = image.flip(-3)\n    return out",
    "docstring": "Convert a BGR image to RGB. Args: image: BGR Image to be converted to BGR of shape :math:. Returns: RGB version of the image with shape of shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = bgr_to_rgb(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "FunctionDef name:bgr_to_rgb arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "count_params",
    "source_code": "def count_params(self):\n    if not self.built:\n        if getattr(self, '_is_graph_network', False):\n            with tf_utils.maybe_init_scope(self):\n                self._maybe_build(self.inputs)\n        else:\n            raise ValueError('You tried to call `count_params` on ' + self.name + \", but the layer isn't built. You can build it manually via: `\" + self.name + '.build(batch_input_shape)`.')\n    return layer_utils.count_params(self.weights)",
    "docstring": "Count the total number of scalars composing the weights. Returns: An integer count. Raises: ValueError: if the layer isn't yet built (in which case its weights aren't yet defined).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:count_params arg:self arguments arg If If Call With Call Call Raise Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_generate_sparse_data",
    "source_code": "def _generate_sparse_data(X_csr):\n    assert X_csr.format == 'csr'\n    yield ('csr', X_csr.copy())\n    for sparse_format in ['dok', 'lil', 'dia', 'bsr', 'csc', 'coo']:\n        yield (sparse_format, X_csr.asformat(sparse_format))\n    X_coo = X_csr.asformat('coo')\n    X_coo.row = X_coo.row.astype('int64')\n    X_coo.col = X_coo.col.astype('int64')\n    yield ('coo_64', X_coo)\n    for sparse_format in ['csc', 'csr']:\n        X = X_csr.asformat(sparse_format)\n        X.indices = X.indices.astype('int64')\n        X.indptr = X.indptr.astype('int64')\n        yield (sparse_format + '_64', X)",
    "docstring": "Generate sparse matrices or arrays with {32,64}bit indices of diverse format. Parameters ---------- X_csr: scipy.sparse.csr_matrix or scipy.sparse.csr_array Input in CSR format. Returns ------- out: iter(Matrices) or iter(Arrays) In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo', 'coo_64', 'csc_64', 'csr_64']",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py",
    "ast_data": "FunctionDef name:_generate_sparse_data arg:X_csr arguments arg Compare Call For Call Assign Call Assign Call Assign Call For Assign Call Assign Call Assign Call"
  },
  {
    "library": "cryptography",
    "name": "nonce",
    "source_code": "@property\n@abc.abstractmethod\ndef nonce(self) -> utils.Buffer:\n    pass",
    "docstring": "The value of the nonce for this mode as bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\modes.py",
    "ast_data": "FunctionDef name:nonce arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "targets",
    "source_code": "@property\ndef targets(self) -> Iterable[function_type.FunctionType]:\n    return self._dispatch_table.keys()",
    "docstring": "Returns an iterable to all targets in the table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\type_dispatch.py",
    "ast_data": "FunctionDef name:targets arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_fit",
    "source_code": "def _fit(self, X, y=None, routed_params=None, raw_params=None):\n    self.steps = list(self.steps)\n    self._validate_steps()\n    memory = check_memory(self.memory)\n    fit_transform_one_cached = memory.cache(_fit_transform_one)\n    for step_idx, name, transformer in self._iter(with_final=False, filter_passthrough=False):\n        if transformer is None or transformer == 'passthrough':\n            with _print_elapsed_time('Pipeline', self._log_message(step_idx)):\n                continue\n        if hasattr(memory, 'location') and memory.location is None:\n            cloned_transformer = transformer\n        else:\n            cloned_transformer = clone(transformer)\n        step_params = self._get_metadata_for_step(step_idx=step_idx, step_params=routed_params[name], all_params=raw_params)\n        X, fitted_transformer = fit_transform_one_cached(cloned_transformer, X, y, weight=None, message_clsname='Pipeline', message=self._log_message(step_idx), params=step_params)\n        self.steps[step_idx] = (name, fitted_transformer)\n    return X",
    "docstring": "Fit the pipeline except the last step. routed_params is the output of raw_params is the parameters passed by the user, used when is set by the user, to transform metadata using a sub-pipeline.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:_fit arg:self arg:X arg:y arg:routed_params arg:raw_params arguments arg arg arg arg arg Assign Call Call Assign Call Assign Call For Call If BoolOp Compare Compare With Call Call If BoolOp Call Compare Assign Assign Call Assign Call Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_NoneCodec",
    "source_code": "class _NoneCodec:\n\n    def can_encode(self, pyobj):\n        return pyobj is None\n\n    def do_encode(self, none_value, encode_fn):\n        del encode_fn, none_value\n        value = struct_pb2.StructuredValue()\n        value.none_value.CopyFrom(struct_pb2.NoneValue())\n        return value\n\n    def can_decode(self, value):\n        return value.HasField('none_value')\n\n    def do_decode(self, value, decode_fn):\n        del decode_fn, value\n        return None",
    "docstring": "Codec for None.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "ClassDef name:_NoneCodec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes Compare FunctionDef name:do_encode arg:self arg:none_value arg:encode_fn arguments arg arg arg Assign Call Call Call Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "dest_nrows",
    "source_code": "def dest_nrows(self):\n    return math_ops.cast(array_ops.shape(self.gather_index)[0], dtype=self.dtype)",
    "docstring": "Return the number of rows in the resulting gather, or None if tiling.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:dest_nrows arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_reduce_scatter_divide_factor",
    "source_code": "def set_reduce_scatter_divide_factor(self, factor: float) -> None:\n    state = self._get_fsdp_state()\n    if (fsdp_param_group := state._fsdp_param_group) is not None:\n        mul_factor = 1.0 / float(factor)\n        reduce_op = torch.distributed._make_nccl_premul_sum(mul_factor)\n        fsdp_param_group.reduce_scatter_reduce_op = reduce_op",
    "docstring": "Sets a custom divide factor for the reduce-scatter. This becomes a custom reduce op using NCCL's PreMulSum, which allows multiplying by the factor before reduction. Args: factor (float): Custom divide factor.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:set_reduce_scatter_divide_factor arg:self arg:factor arguments arg arg Assign Call If Compare Assign Call Assign Call Assign"
  },
  {
    "library": "kornia",
    "name": "to_axis_angle",
    "source_code": "def to_axis_angle(self) -> Tensor:\n    return quaternion_to_axis_angle(self.data)",
    "docstring": "Convert the quaternion to an axis-angle representation. Example: >>> q = Quaternion.identity() >>> axis_angle = q.to_axis_angle() >>> axis_angle tensor([0., 0., 0.], grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:to_axis_angle arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "__call__",
    "source_code": "def __call__(self, estimator, *args, **kwargs):\n    return estimator.score(*args, **kwargs)",
    "docstring": "Method that wraps estimator.score",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\metrics\\_scorer.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:estimator arguments arg arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_has_custom_op",
    "source_code": "def _has_custom_op(sharding_spec, op):\n    class_name = type(sharding_spec).__qualname__\n    return class_name in _CUSTOM_SHARDING_SPEC_OPS and op in _CUSTOM_SHARDING_SPEC_OPS[class_name]",
    "docstring": "Returns whether or not the ShardingSpec has a custom op implementation.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\api.py",
    "ast_data": "FunctionDef name:_has_custom_op arg:sharding_spec arg:op arguments arg arg Assign Call Return return:yes BoolOp Compare Compare"
  },
  {
    "library": "scipy",
    "name": "random_base2",
    "source_code": "def random_base2(self, m: IntNumber) -> np.ndarray:\n    n = 2 ** m\n    total_n = self.num_generated + n\n    if not total_n & total_n - 1 == 0:\n        raise ValueError(f\"The balance properties of Sobol' points require n to be a power of 2. {self.num_generated} points have been previously generated, then: n={self.num_generated}+2**{m}={total_n}. If you still want to do this, the function 'Sobol.random()' can be used.\")\n    return self.random(n)",
    "docstring": "Draw point(s) from the Sobol' sequence. This function draws :math: points in the parameter space ensuring the balance properties of the sequence. Parameters ---------- m : int Logarithm in base 2 of the number of samples; i.e., n = 2^m. Returns ------- sample : array_like (n, d) Sobol' sample.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:random_base2 arg:self arg:m arguments arg arg Assign Assign If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_output_attrs",
    "source_code": "@trackable.no_automatic_dependency_tracking\ndef _set_output_attrs(self, outputs):\n    outputs = nest.flatten(outputs)\n    self.outputs = outputs\n    self.output_names = training_utils_v1.generic_output_names(outputs)\n    self.built = True",
    "docstring": "Sets attributes related to the outputs of the Model.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_set_output_attrs arg:self arg:outputs arguments arg arg Assign Call Assign Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "_unable_to_call_layer_due_to_serialization_issue",
    "source_code": "def _unable_to_call_layer_due_to_serialization_issue(layer, *unused_args, **unused_kwargs):\n    raise ValueError('Cannot call custom layer {} of type {}, because the call function was not serialized to the SavedModel.Please try one of the following methods to fix this issue:\\n\\n(1) Implement `get_config` and `from_config` in the layer/model class, and pass the object to the `custom_objects` argument when loading the model. For more details, see: https://www.tensorflow.org/guide/keras/save_and_serialize\\n\\n(2) Ensure that the subclassed model or layer overwrites `call` and not `__call__`. The input shape and dtype will be automatically recorded when the object is called, and used when saving. To manually specify the input shape/dtype, decorate the call function with `@tf.function(input_signature=...)`.'.format(layer.name, type(layer)))",
    "docstring": "Replaces the if the layer was not fully serialized. Keras Model/Layer serialization is relatively relaxed because SavedModels are not always loaded back as keras models. Thus, when there is an issue tracing a non-signature function, a warning is logged instead of raising an error. This results in a SavedModel where the model's call function is saved, but the internal layer call functions are not. When deserialized with , the internal layers which do not have serialized call functions should raise an error when called. Args: layer: Layer without the serialized call function. Raises: ValueError",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:_unable_to_call_layer_due_to_serialization_issue arg:layer arguments arg arg arg Raise Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "enable_param_learning",
    "source_code": "@torch.jit.export\ndef enable_param_learning(self):\n    self.toggle_qparam_learning(enabled=True).toggle_fake_quant(enabled=True).toggle_observer_update(enabled=False)\n    return self",
    "docstring": "Enable parameter learning over static observer estimates. Enables learning of quantization parameters and disables static observer estimates. Forward path returns fake quantized X.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\_learnable_fake_quantize.py",
    "ast_data": "FunctionDef name:enable_param_learning arg:self arguments arg Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "AdjustLog",
    "source_code": "class AdjustLog(Module):\n\n    def __init__(self, gain: float=1, inv: bool=False, clip_output: bool=True) -> None:\n        super().__init__()\n        self.gain: float = gain\n        self.inv: bool = inv\n        self.clip_output: bool = clip_output\n\n    def forward(self, image: Tensor) -> Tensor:\n        return adjust_log(image, gain=self.gain, inv=self.inv, clip_output=self.clip_output)",
    "docstring": "Adjust log correction on the input image tensor. The input image is expected to be in the range of [0, 1]. Reference: [1]: Args: image: Image to be adjusted in the shape of :math:. gain: The multiplier of logarithmic function. inv: If is set to True the function will return the inverse logarithmic correction. clip_output: Whether to clip the output image with range of [0, 1]. Example: >>> x = torch.zeros(1, 1, 2, 2) >>> AdjustLog(inv=True)(x) tensor([[[[0., 0.], [0., 0.]]]])",
    "type": "class",
    "file_path": "kornia\\kornia\\enhance\\adjust.py",
    "ast_data": "ClassDef name:AdjustLog FunctionDef name:__init__ arg:self arg:gain arg:inv arg:clip_output arguments arg arg arg arg Call Call FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "LazyRfc3339UtcTime",
    "source_code": "class LazyRfc3339UtcTime(object):\n\n    def __str__(self):\n        iso_formatted_now = datetime.datetime.now(datetime.timezone.utc).isoformat('T')\n        return f'{iso_formatted_now!s}Z'",
    "docstring": "A postponed timestamp string retrieval class.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cplogging.py",
    "ast_data": "ClassDef name:LazyRfc3339UtcTime FunctionDef name:__str__ arg:self arguments arg Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_get_related_models",
    "source_code": "def _get_related_models(m):\n    related_models = [subclass for subclass in m.__subclasses__() if issubclass(subclass, models.Model)]\n    related_fields_models = set()\n    for f in m._meta.get_fields(include_parents=True, include_hidden=True):\n        if f.is_relation and f.related_model is not None and (not isinstance(f.related_model, str)):\n            related_fields_models.add(f.model)\n            related_models.append(f.related_model)\n    opts = m._meta\n    if opts.proxy and m in related_fields_models:\n        related_models.append(opts.concrete_model)\n    return related_models",
    "docstring": "Return all models that have a direct relationship to the given model.",
    "type": "function",
    "file_path": "django\\django\\db\\migrations\\state.py",
    "ast_data": "FunctionDef name:_get_related_models arg:m arguments arg Assign Call Call Assign Call For Call If BoolOp Compare Call Call Call Assign If BoolOp Compare Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_is_fitted",
    "source_code": "def _is_fitted(estimator, attributes=None, all_or_any=all):\n    if attributes is not None:\n        if not isinstance(attributes, (list, tuple)):\n            attributes = [attributes]\n        return all_or_any([hasattr(estimator, attr) for attr in attributes])\n    if hasattr(estimator, '__sklearn_is_fitted__'):\n        return estimator.__sklearn_is_fitted__()\n    fitted_attrs = [v for v in vars(estimator) if v.endswith('_') and (not v.startswith('__'))]\n    return len(fitted_attrs) > 0",
    "docstring": "Determine if an estimator is fitted Parameters ---------- estimator : estimator instance Estimator instance for which the check is performed. attributes : str, list or tuple of str, default=None Attribute name(s) given as string or a list/tuple of strings Eg.: `Noneestimator` is considered fitted if there exist an attribute that ends with a underscore and does not start with double underscore. all_or_any : callable, {all, any}, default=all Specify whether all or any of the given attributes must exist. Returns ------- fitted : bool Whether the estimator is fitted.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_is_fitted arg:estimator arg:attributes arg:all_or_any arguments arg arg arg If Compare If Call Assign Return return:yes Call Call If Call Return return:yes Call Assign Call BoolOp Call Call Return return:yes Compare Call"
  },
  {
    "library": "scipy",
    "name": "scipy_namespace_for",
    "source_code": "def scipy_namespace_for(xp: ModuleType) -> ModuleType | None:\n    if is_cupy(xp):\n        import cupyx\n        return cupyx.scipy\n    if is_jax(xp):\n        import jax\n        return jax.scipy\n    if is_torch(xp):\n        return xp\n    return None",
    "docstring": "Return the -like namespace of a non-NumPy backend That is, return the namespace corresponding with backend that contains sub-namespaces like and . If no such namespace exists, return ``. Useful for dispatching.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api.py",
    "ast_data": "FunctionDef name:scipy_namespace_for arg:xp arguments arg If Call Return return:yes If Call Return return:yes If Call Return return:yes Return return:no"
  },
  {
    "library": "django",
    "name": "cache_page",
    "source_code": "def cache_page(timeout, *, cache=None, key_prefix=None):\n    return decorator_from_middleware_with_args(CacheMiddleware)(page_timeout=timeout, cache_alias=cache, key_prefix=key_prefix)",
    "docstring": "Decorator for views that tries getting the page from the cache and populates the cache if the page isn't in the cache yet. The cache is keyed by the URL and some data from the headers. Additionally there is the key prefix that is used to distinguish different cache areas in a multi-site setup. You could use the get_current_site().domain, for example, as that is unique across a Django project. Additionally, all headers from the response's Vary header will be taken into account on caching -- just like the middleware does.",
    "type": "function",
    "file_path": "django\\django\\views\\decorators\\cache.py",
    "ast_data": "FunctionDef name:cache_page arg:timeout arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "crosses",
    "source_code": "def crosses(self, other):\n    return self._topology(capi.ogr_crosses, other)",
    "docstring": "Return True if this geometry crosses the other.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:crosses arg:self arg:other arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "Identity",
    "source_code": "class Identity(Initializer):\n\n    def __init__(self, gain=1.0):\n        self.gain = gain\n\n    def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n        self._validate_kwargs(kwargs, support_partition=False)\n        dtype = _assert_float_dtype(dtype)\n        if len(shape) != 2:\n            raise ValueError(f'The tensor to initialize, specified by argument `shape` must be at least two-dimensional. Received shape={shape}')\n        initializer = linalg_ops_impl.eye(*shape, dtype=dtype)\n        return self.gain * initializer\n\n    def get_config(self):\n        return {'gain': self.gain}",
    "docstring": "Initializer that generates the identity matrix. Initializers allow you to pre-specify an initialization strategy, encoded in the Initializer object, without knowing the shape and dtype of the variable being initialized. Only usable for generating 2D matrices. Examples: >>> def make_variable(k, initializer): ... return tf.Variable(initializer(shape=[k, k], dtype=tf.float32)) >>> make_variable(2, tf.initializers.Identity()) >>> make_variable(3, tf.initializers.Identity(gain=0.5)) Args: gain: Multiplicative factor to apply to the identity matrix.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "ClassDef name:Identity FunctionDef name:__init__ arg:self arg:gain arguments arg arg Assign FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If Compare Call Raise Call Assign Call Return return:yes FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "entropy",
    "source_code": "@_axis_nan_policy_factory(lambda x: x, n_samples=lambda kwgs: 2 if 'qk' in kwgs and kwgs['qk'] is not None else 1, n_outputs=1, result_to_tuple=lambda x, _: (x,), paired=True, too_small=-1)\ndef entropy(pk: np.typing.ArrayLike, qk: np.typing.ArrayLike | None=None, base: float | None=None, axis: int=0) -> np.number | np.ndarray:\n    if base is not None and base <= 0:\n        raise ValueError('`base` must be a positive number or `None`.')\n    xp = array_namespace(pk, qk)\n    pk, qk = xp_promote(pk, qk, broadcast=True, xp=xp)\n    with np.errstate(invalid='ignore'):\n        if qk is not None:\n            pk, qk = _share_masks(pk, qk, xp=xp)\n            qk = qk / xp.sum(qk, axis=axis, keepdims=True)\n        pk = pk / xp.sum(pk, axis=axis, keepdims=True)\n    if qk is None:\n        vec = special.entr(pk)\n    elif is_marray(xp):\n        vec = special.rel_entr(pk.data, qk.data)\n        vec = xp.asarray(vec, mask=pk.mask)\n    else:\n        vec = special.rel_entr(pk, qk)\n    S = xp.sum(vec, axis=axis)\n    if base is not None:\n        S /= math.log(base)\n    return S",
    "docstring": "Calculate the Shannon entropy/relative entropy of given distribution(s). If only probabilities are given, the Shannon entropy is calculated as `qkpkqkpkpkqkpkqkpkqkpkentropy: >>> CE = entropy(pk, base=base) + entropy(pk, qk, base=base) >>> CE 1.736965594166206 >>> CE == -np.sum(pk * np.log(qk)) / np.log(base) True",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_entropy.py",
    "ast_data": "FunctionDef name:entropy arg:pk arg:qk arg:base arg:axis arguments arg arg arg arg If BoolOp Compare Compare Raise Call Assign Call Assign Call With Call If Compare Assign Call Assign Call Assign Call If Compare Assign Call If Call Assign Call Assign Call Assign Call Assign Call If Compare Call Return return:yes Call arguments arg arguments arg BoolOp Compare Compare arguments arg arg"
  },
  {
    "library": "matplotlib",
    "name": "_check_xy",
    "source_code": "def _check_xy(self, renderer):\n    b = self.get_annotation_clip()\n    if b or (b is None and self.coords1 == 'data'):\n        xy_pixel = self._get_xy(self.xy1, self.coords1, self.axesA)\n        if self.axesA is None:\n            axes = self.axes\n        else:\n            axes = self.axesA\n        if not axes.contains_point(xy_pixel):\n            return False\n    if b or (b is None and self.coords2 == 'data'):\n        xy_pixel = self._get_xy(self.xy2, self.coords2, self.axesB)\n        if self.axesB is None:\n            axes = self.axes\n        else:\n            axes = self.axesB\n        if not axes.contains_point(xy_pixel):\n            return False\n    return True",
    "docstring": "Check whether the annotation needs to be drawn.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:_check_xy arg:self arg:renderer arguments arg arg Assign Call If BoolOp BoolOp Compare Compare Assign Call If Compare Assign Assign If Call Return return:yes If BoolOp BoolOp Compare Compare Assign Call If Compare Assign Assign If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_tensor_by_tf_output",
    "source_code": "def _get_tensor_by_tf_output(self, tf_output) -> tensor_lib.Tensor:\n    op = self._get_operation_by_tf_operation(tf_output.oper)\n    return op.outputs[tf_output.index]",
    "docstring": "Returns the representing . Note that there is only one such , i.e. multiple calls to this function with the same TF_Output value will always return the same object. Args: tf_output: A wrapped (the C API equivalent of ). Returns: The that represents .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_get_tensor_by_tf_output arg:self arg:tf_output arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "stde_median",
    "source_code": "def stde_median(data, axis=None):\n\n    def _stdemed_1D(data):\n        data = np.sort(data.compressed())\n        n = len(data)\n        z = 2.5758293035489004\n        k = int(np.round((n + 1) / 2.0 - z * np.sqrt(n / 4.0), 0))\n        return (data[n - k] - data[k - 1]) / (2.0 * z)\n    data = ma.array(data, copy=False, subok=True)\n    if axis is None:\n        return _stdemed_1D(data)\n    else:\n        if data.ndim > 2:\n            raise ValueError(f\"Array 'data' must be at most two dimensional, but got data.ndim = {data.ndim}\")\n        return ma.apply_along_axis(_stdemed_1D, axis, data)",
    "docstring": "Returns the McKean-Schrader estimate of the standard error of the sample median along the given axis. masked values are discarded. Parameters ---------- data : ndarray Data to trim. axis : {None,int}, optional Axis along which to perform the trimming. If None, the input array is first flattened.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:stde_median arg:data arg:axis arguments arg arg FunctionDef name:_stdemed_1D arg:data arguments arg Assign Call Call Assign Call Assign Assign Call Call Call Return return:yes Assign Call If Compare Return return:yes Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_enumerate_cores",
    "source_code": "def _enumerate_cores(bounds: List[int], ring_bounds: List[int], ring_sizes: List[int], host_bounds: List[int], host_sizes: List[int]) -> List[List[int]]:\n    if not bounds:\n        return [[]]\n    partials = _enumerate_cores(bounds[:-1], ring_bounds[:-1], ring_sizes[:-1], host_bounds[:-1], host_sizes[:-1])\n    results = []\n    for ring_i in range(0, bounds[-1], ring_bounds[-1]):\n        for ring_j in range(0, len(partials), ring_sizes[-1]):\n            for host_i in range(ring_i, ring_i + ring_bounds[-1], host_bounds[-1]):\n                for host_j in range(ring_j, ring_j + ring_sizes[-1], host_sizes[-1]):\n                    for i in range(host_i, host_i + host_bounds[-1]):\n                        for j in range(host_j, host_j + host_sizes[-1]):\n                            results.append(partials[j] + [i])\n    return results",
    "docstring": "Enumerates cores within from fatest to slowest varying axes. Args: bounds: Upper bounds of axes, from fastest to slowest varying. ring_bounds: Upper bounds of ring size per axis in the same axis order. ring_sizes: Number consecutive cores in the ring built so far, cumulatively. host_bounds: Number of axis values per host in the same axis order. host_sizes: Number consecutive cores on one host, cumulatively. Returns: Cores represented as a list of 4 integers in the same axis order.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\tpu_util.py",
    "ast_data": "FunctionDef name:_enumerate_cores arg:bounds arg:ring_bounds arg:ring_sizes arg:host_bounds arg:host_sizes arguments arg arg arg arg arg If Return return:no Assign Call Assign For Call For Call Call For Call For Call For Call For Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, html_template_path, export_report_path):\n    if not _file_io.file_exists(html_template_path):\n        raise IOError(\"File '{0}' does not exist.\".format(html_template_path))\n    with _file_io.FileIO(html_template_path, 'r') as f:\n        self.html_template = f.read()\n    _file_io.recursive_create_dir(os.path.dirname(export_report_path))\n    self.export_report_path = export_report_path",
    "docstring": "Reads the HTML template content. Args: html_template_path: A string, path to the template HTML file. export_report_path: A string, path to the generated HTML report. This path should point to a '.html' file with date and time in its name. e.g. 2019-01-01-10:05.toco_report.html. Raises: IOError: File doesn't exist.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\toco\\logging\\gen_html.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:html_template_path arg:export_report_path arguments arg arg arg If Call Raise Call Call With Call Assign Call Call Call Assign"
  },
  {
    "library": "django",
    "name": "warning",
    "source_code": "def warning(request, message, extra_tags='', fail_silently=False):\n    add_message(request, constants.WARNING, message, extra_tags=extra_tags, fail_silently=fail_silently)",
    "docstring": "Add a message with the `` level.",
    "type": "function",
    "file_path": "django\\django\\contrib\\messages\\api.py",
    "ast_data": "FunctionDef name:warning arg:request arg:message arg:extra_tags arg:fail_silently arguments arg arg arg arg Call"
  },
  {
    "library": "matplotlib",
    "name": "edit_margin_min",
    "source_code": "def edit_margin_min(self, todo, size, cell=0):\n    if size > self.margin_vals[todo][cell]:\n        self.edit_margin(todo, size, cell)",
    "docstring": "Change the minimum size of the margin for one cell. Parameters ---------- todo : string (one of 'left', 'right', 'bottom', 'top') margin to alter. size : float Minimum size of the margin . If it is larger than the existing minimum it updates the margin size. Fraction of figure size. cell : int Cell column or row to edit.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:edit_margin_min arg:self arg:todo arg:size arg:cell arguments arg arg arg arg If Compare Call"
  },
  {
    "library": "scikit-learn",
    "name": "_route_params",
    "source_code": "def _route_params(self, params, parent, caller):\n    self._check_warnings(params=params)\n    unrequested = dict()\n    args = {arg: value for arg, value in params.items() if value is not None}\n    res = Bunch()\n    for prop, alias in self._requests.items():\n        if alias is False or alias == WARN:\n            continue\n        elif alias is True and prop in args:\n            res[prop] = args[prop]\n        elif alias is None and prop in args:\n            unrequested[prop] = args[prop]\n        elif alias in args:\n            res[prop] = args[alias]\n    if unrequested:\n        if self.method in COMPOSITE_METHODS:\n            callee_methods = COMPOSITE_METHODS[self.method]\n        else:\n            callee_methods = [self.method]\n        set_requests_on = ''.join([f'.set_{method}_request({{metadata}}=True/False)' for method in callee_methods])\n        message = f'[{', '.join([key for key in unrequested])}] are passed but are not explicitly set as requested or not requested for {self.owner}.{self.method}, which is used within {parent}.{caller}. Call `{self.owner}' + set_requests_on + '` for each metadata you want to request/ignore. See the Metadata Routing User guide <https://scikit-learn.org/stable/metadata_routing.html> for more information.'\n        raise UnsetMetadataPassedError(message=message, unrequested_params=unrequested, routed_params=res)\n    return res",
    "docstring": "Prepare the given parameters to be passed to the method. The output of this method can be used directly as the input to the corresponding method as extra props. Parameters ---------- params : dict A dictionary of provided metadata. parent : object Parent class object, that routes the metadata. caller : str Method from the parent class object, where the metadata is routed from. Returns ------- params : Bunch A :class: of {prop: value} which can be given to the corresponding method.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py",
    "ast_data": "FunctionDef name:_route_params arg:self arg:params arg:parent arg:caller arguments arg arg arg arg Call Assign Call Assign Call Compare Assign Call For Call If BoolOp Compare Compare If BoolOp Compare Compare Assign If BoolOp Compare Compare Assign If Compare Assign If If Compare Assign Assign Assign Call Assign Call Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "urlencode",
    "source_code": "@register.filter(is_safe=False)\n@stringfilter\ndef urlencode(value, safe=None):\n    kwargs = {}\n    if safe is not None:\n        kwargs['safe'] = safe\n    return quote(value, **kwargs)",
    "docstring": "Escape a value for use in a URL. The `` parameter determines the characters which should not be escaped by Python's quote() function. If not provided, use the default safe characters (but an empty string can be provided when *all* characters should be escaped).",
    "type": "function",
    "file_path": "django\\django\\template\\defaultfilters.py",
    "ast_data": "FunctionDef name:urlencode arg:value arg:safe arguments arg arg Assign If Compare Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "read_graphs_event",
    "source_code": "def read_graphs_event(self, offset):\n    return debug_event_pb2.DebugEvent.FromString(self._get_reader(self._graphs_path).read(offset)[0])",
    "docstring": "Read a DebugEvent proto at a given offset from the .graphs file. Args: offset: Offset to read the DebugEvent proto from. Returns: A DebugEventProto. Raises: if offset is at a wrong location. if offset is out of range of the file.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:read_graphs_event arg:self arg:offset arguments arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "enable_jit_compile_rewrite",
    "source_code": "def enable_jit_compile_rewrite():\n    global _JIT_COMPILE_REWRITE_ENABLED\n    _JIT_COMPILE_REWRITE_ENABLED = True\n    if context_safe() is not None:\n        context_safe().jit_compile_rewrite = True",
    "docstring": "Run jit_compile functions through rewrite pass. This runs jit_compile functions through all of the multidevice function rewrite passes.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:enable_jit_compile_rewrite arguments Assign If Compare Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_DataServiceDatasetV1",
    "source_code": "class _DataServiceDatasetV1(dataset_ops.DatasetV1Adapter):\n\n    @functools.wraps(_DataServiceDatasetV2.__init__)\n    def __init__(self, dataset_id, processing_mode, address, element_spec, protocol, data_transfer_protocol, job_name, consumer_index, num_consumers, max_outstanding_requests, task_refresh_interval_hint_ms, cross_trainer_cache, target_workers):\n        self._wrapped = _DataServiceDatasetV2(dataset_id=dataset_id, processing_mode=processing_mode, address=address, element_spec=element_spec, protocol=protocol, data_transfer_protocol=data_transfer_protocol, job_name=job_name, consumer_index=consumer_index, num_consumers=num_consumers, max_outstanding_requests=max_outstanding_requests, task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, cross_trainer_cache=cross_trainer_cache, target_workers=target_workers)\n        super(_DataServiceDatasetV1, self).__init__(self._wrapped)",
    "docstring": "A that executes its input through the tf.data service.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\data_service_ops.py",
    "ast_data": "ClassDef name:_DataServiceDatasetV1 FunctionDef name:__init__ arg:self arg:dataset_id arg:processing_mode arg:address arg:element_spec arg:protocol arg:data_transfer_protocol arg:job_name arg:consumer_index arg:num_consumers arg:max_outstanding_requests arg:task_refresh_interval_hint_ms arg:cross_trainer_cache arg:target_workers arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "output_mask",
    "source_code": "@property\n@doc_controls.do_not_doc_inheritable\ndef output_mask(self):\n    output = self.output\n    if isinstance(output, list):\n        return [getattr(x, '_keras_mask', None) for x in output]\n    else:\n        return getattr(output, '_keras_mask', None)",
    "docstring": "Retrieves the output mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer. Returns: Output mask tensor (potentially None) or list of output mask tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:output_mask arg:self arguments arg Assign If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "evaluate_max",
    "source_code": "def evaluate_max(self, left: Expr, right: Expr) -> Expr:\n    min_val = self.evaluate_min(left, right)\n    return right if min_val is left else left",
    "docstring": "return the larger of left and right, and guard on that choice",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:evaluate_max arg:self arg:left arg:right arguments arg arg arg Assign Call Return return:yes Compare"
  },
  {
    "library": "scikit-learn",
    "name": "ArgKmin",
    "source_code": "class ArgKmin(BaseDistancesReductionDispatcher):\n\n    @classmethod\n    def compute(cls, X, Y, k, metric='euclidean', chunk_size=None, metric_kwargs=None, strategy=None, return_distance=False):\n        if X.dtype == Y.dtype == np.float64:\n            return ArgKmin64.compute(X=X, Y=Y, k=k, metric=metric, chunk_size=chunk_size, metric_kwargs=metric_kwargs, strategy=strategy, return_distance=return_distance)\n        if X.dtype == Y.dtype == np.float32:\n            return ArgKmin32.compute(X=X, Y=Y, k=k, metric=metric, chunk_size=chunk_size, metric_kwargs=metric_kwargs, strategy=strategy, return_distance=return_distance)\n        raise ValueError(f'Only float64 or float32 datasets pairs are supported at this time, got: X.dtype={X.dtype} and Y.dtype={Y.dtype}.')",
    "docstring": "Compute the argkmin of row vectors of X on the ones of Y. For each row vector of X, computes the indices of k first the rows vectors of Y with the smallest distances. ArgKmin is typically used to perform bruteforce k-nearest neighbors queries. This class is not meant to be instantiated, one should only use its :meth: classmethod which handles allocation and deallocation consistently.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\metrics\\_pairwise_distances_reduction\\_dispatcher.py",
    "ast_data": "ClassDef name:ArgKmin FunctionDef name:compute arg:cls arg:X arg:Y arg:k arg:metric arg:chunk_size arg:metric_kwargs arg:strategy arg:return_distance arguments arg arg arg arg arg arg arg arg arg If Compare Return return:yes Call If Compare Return return:yes Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "DeferredLine",
    "source_code": "class DeferredLine(DeferredLineBase):\n\n    def __init__(self, name: str, line: str):\n        super().__init__(line)\n        self.name = name\n        assert not isinstance(line, DeferredLineBase)\n\n    def __call__(self) -> Optional[str]:\n        if not is_buffer_removed(self.name):\n            return self.line\n        return None\n\n    def _new_line(self, line: str) -> DeferredLine:\n        return DeferredLine(self.name, line)",
    "docstring": "A line that can be 'unwritten' by adding name to V.graph.removed_buffers",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "ClassDef name:DeferredLine FunctionDef name:__init__ arg:self arg:name arg:line arguments arg arg arg Call Call Assign Call FunctionDef name:__call__ arg:self arguments arg If Call Return return:yes Return return:no FunctionDef name:_new_line arg:self arg:line arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_outputs_meta",
    "source_code": "def get_outputs_meta(self) -> tuple[torch.Tensor, ...]:\n    assert self._outputs_meta is not None, 'Attempted to get_outputs_meta() without configuring output meta'\n    return self._outputs_meta",
    "docstring": "Get the output metadata (meta tensors) reprensenting the outputs of this stage",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:get_outputs_meta arg:self arguments arg Compare Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "clear_doc",
    "source_code": "def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None:\n    raise NotImplementedError",
    "docstring": "Remove specified data of a document. This method is called on the removal of the document. .. seealso:: :event:",
    "type": "method",
    "file_path": "sphinx\\sphinx\\environment\\collectors\\__init__.py",
    "ast_data": "FunctionDef name:clear_doc arg:self arg:app arg:env arg:docname arguments arg arg arg arg Raise"
  },
  {
    "library": "matplotlib",
    "name": "_euler_step",
    "source_code": "def _euler_step(xyf_traj, dmap, f):\n    ny, nx = dmap.grid.shape\n    xi, yi = xyf_traj[-1]\n    cx, cy = f(xi, yi)\n    if cx == 0:\n        dsx = np.inf\n    elif cx < 0:\n        dsx = xi / -cx\n    else:\n        dsx = (nx - 1 - xi) / cx\n    if cy == 0:\n        dsy = np.inf\n    elif cy < 0:\n        dsy = yi / -cy\n    else:\n        dsy = (ny - 1 - yi) / cy\n    ds = min(dsx, dsy)\n    xyf_traj.append((xi + cx * ds, yi + cy * ds))\n    return (ds, xyf_traj)",
    "docstring": "Simple Euler integration step that extends streamline to boundary.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\streamplot.py",
    "ast_data": "FunctionDef name:_euler_step arg:xyf_traj arg:dmap arg:f arguments arg arg arg Assign Assign Assign Call If Compare Assign If Compare Assign Assign If Compare Assign If Compare Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    return f'Shard(dim={self.dim})'",
    "docstring": "machine readable representation of the Shard placement",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "push_current",
    "source_code": "def push_current(self, figure=None):\n    if not figure:\n        figure = self.figure\n    views = WeakKeyDictionary()\n    pos = WeakKeyDictionary()\n    for a in figure.get_axes():\n        views[a] = a._get_view()\n        pos[a] = self._axes_pos(a)\n    self.views[figure].push(views)\n    self.positions[figure].push(pos)",
    "docstring": "Push the current view limits and position onto their respective stacks.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:push_current arg:self arg:figure arguments arg arg If Assign Assign Call Assign Call For Call Assign Call Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "max_memory_allocated",
    "source_code": "def max_memory_allocated(device: 'Device'=None) -> int:\n    return memory_stats(device=device).get('allocated_bytes.all.peak', 0)",
    "docstring": "Return the maximum GPU memory occupied by tensors in bytes for a given device. By default, this returns the peak allocated memory since the beginning of this program. :func: can be used to reset the starting point in tracking this metric. For example, these two functions can measure the peak allocated memory usage of each iteration in a training loop. Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `cuda-memory-management` for more details about GPU memory management.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:max_memory_allocated arg:device arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_generate_torchscript_file",
    "source_code": "def _generate_torchscript_file(model_src: str, name: str) -> Optional[str]:\n    assert 'jit_model = ' in model_src, f'Missing jit_model definition:\\n{model_src}'\n    model_src = f'import torch\\n{model_src}'\n    model_root = os.path.join(get_temp_dir(), 'TorchScript_models')\n    os.makedirs(model_root, exist_ok=True)\n    module_path = os.path.join(model_root, f'torchscript_{name}.py')\n    artifact_path = os.path.join(model_root, f'torchscript_{name}.pt')\n    if os.path.exists(module_path):\n        raise ValueError(f'File {module_path} already exists.')\n    with open(module_path, 'w') as f:\n        f.write(model_src)\n    module_spec = importlib.util.spec_from_file_location(f'torchscript__{name}', module_path)\n    assert module_spec is not None\n    module = importlib.util.module_from_spec(module_spec)\n    loader = module_spec.loader\n    assert loader is not None\n    loader.exec_module(module)\n    jit_model = module.jit_model\n    assert isinstance(jit_model, (torch.jit.ScriptFunction, torch.jit.ScriptModule)), f'Expected ScriptFunction or ScriptModule, got: {type(jit_model)}'\n    jit_model.save(artifact_path)\n    os.remove(module_path)\n    return artifact_path",
    "docstring": "Returns the path a saved model if one can be constructed from . Because TorchScript requires actual source code in order to script a model, we can't simply an appropriate model string. Instead, we must write the correct source to a temporary Python file and then import the TorchScript model from that temporary file. must contain , which will supply.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\instruction_counts\\core\\expand.py",
    "ast_data": "FunctionDef name:_generate_torchscript_file arg:model_src arg:name arguments arg arg Compare Assign Assign Call Call Call Assign Call Assign Call If Call Raise Call With Call Call Assign Call Compare Assign Call Assign Compare Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "linebreaks",
    "source_code": "@keep_lazy_text\ndef linebreaks(value, autoescape=False):\n    value = normalize_newlines(value)\n    paras = re.split('\\n{2,}', str(value))\n    if autoescape:\n        paras = ['<p>%s</p>' % escape(p).replace('\\n', '<br>') for p in paras]\n    else:\n        paras = ['<p>%s</p>' % p.replace('\\n', '<br>') for p in paras]\n    return '\\n\\n'.join(paras)",
    "docstring": "Convert newlines into and s.",
    "type": "function",
    "file_path": "django\\django\\utils\\html.py",
    "ast_data": "FunctionDef name:linebreaks arg:value arg:autoescape arguments arg arg Assign Call Assign Call Call If Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "ResidualWrapper",
    "source_code": "@tf_export(v1=['nn.rnn_cell.ResidualWrapper'])\nclass ResidualWrapper(rnn_cell_wrapper_impl.ResidualWrapperBase, _RNNCellWrapperV1):\n\n    def __init__(self, *args, **kwargs):\n        super(ResidualWrapper, self).__init__(*args, **kwargs)\n    __init__.__doc__ = rnn_cell_wrapper_impl.ResidualWrapperBase.__init__.__doc__",
    "docstring": "RNNCell wrapper that ensures cell inputs are added to the outputs.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py",
    "ast_data": "ClassDef name:ResidualWrapper FunctionDef name:__init__ arg:self arguments arg arg arg Call Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "_values",
    "source_code": "@property\ndef _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray:\n    mgr = self._mgr\n    blocks = mgr.blocks\n    if len(blocks) != 1:\n        return ensure_wrapped_if_datetimelike(self.values)\n    arr = blocks[0].values\n    if arr.ndim == 1:\n        return self.values\n    arr = cast('np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray', arr)\n    return arr.T",
    "docstring": "Analogue to ._values that may return a 2D ExtensionArray.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:_values arg:self arguments arg Assign Assign If Compare Call Return return:yes Call Assign If Compare Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "mm_grid",
    "source_code": "@SymbolicGridFn\ndef mm_grid(m, n, meta, *, cdiv):\n    return (cdiv(m, meta['BLOCK_M']) * cdiv(n, meta['BLOCK_N']), 1, 1)",
    "docstring": "The CUDA grid size for matmul triton templates.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\kernel\\mm_common.py",
    "ast_data": "FunctionDef name:mm_grid arg:m arg:n arg:meta arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_loglevel",
    "source_code": "def set_loglevel(level):\n    _log.setLevel(level.upper())\n    _ensure_handler().setLevel(level.upper())",
    "docstring": "Configure Matplotlib's logging levels. Matplotlib uses the standard library framework under the root logger 'matplotlib'. This is a helper function to: - set Matplotlib's root logger level - set the root logger handler's level, creating the handler if it does not exist yet Typically, one should call `` rather than use this function. Parameters ---------- level : {\"notset\", \"debug\", \"info\", \"warning\", \"error\", \"critical\"} The log level of the handler. Notes ----- The first time this function is called, an additional handler is attached to Matplotlib's root handler; this handler is reused every time and this function simply manipulates the logger and handler's level.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:set_loglevel arg:level arguments arg Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_data_pack_sequence_as",
    "source_code": "def _tf_data_pack_sequence_as(structure, flat_sequence):\n    if not (_tf_data_is_nested(flat_sequence) or isinstance(flat_sequence, list)):\n        raise TypeError(f\"Argument `flat_sequence` must be a sequence. Got '{type(flat_sequence).__name__}'.\")\n    if not _tf_data_is_nested(structure):\n        if len(flat_sequence) != 1:\n            raise ValueError(f'Argument `structure` is a scalar but `len(flat_sequence)`={len(flat_sequence)} > 1')\n        return flat_sequence[0]\n    flat_structure = _tf_data_flatten(structure)\n    if len(flat_structure) != len(flat_sequence):\n        raise ValueError(f'Could not pack sequence. Argument `structure` had {len(flat_structure)} elements, but argument `flat_sequence` had {len(flat_sequence)} elements. Received structure: {structure}, flat_sequence: {flat_sequence}.')\n    _, packed = _tf_data_packed_nest_with_indices(structure, flat_sequence, 0)\n    return sequence_like(structure, packed)",
    "docstring": "Returns a given flattened sequence packed into a nest. If is a scalar, must be a single-element list; in this case the return value is . Args: structure: tuple or list constructed of scalars and/or other tuples/lists, or a scalar. Note: numpy arrays are considered scalars. flat_sequence: flat sequence to pack. Returns: packed: converted to have the same recursive structure as . Raises: ValueError: If nest and structure have different element counts.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:_tf_data_pack_sequence_as arg:structure arg:flat_sequence arguments arg arg If BoolOp Call Call Raise Call Call If Call If Compare Call Raise Call Call Return return:yes Assign Call If Compare Call Call Raise Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "LazyBatchNorm2d",
    "source_code": "class LazyBatchNorm2d(_LazyNormBase, _BatchNorm):\n    cls_to_become = BatchNorm2d\n\n    def _check_input_dim(self, input):\n        if input.dim() != 4:\n            raise ValueError(f'expected 4D input (got {input.dim()}D input)')",
    "docstring": "A :class: module with lazy initialization. Lazy initialization is done for the `BatchNorm2dweightbiasrunning_meanrunning_vartorch.nn.modules.lazy.LazyModuleMixinrunning_meanrunning_var`",
    "type": "class",
    "file_path": "pytorch\\torch\\nn\\modules\\batchnorm.py",
    "ast_data": "ClassDef name:LazyBatchNorm2d Assign FunctionDef name:_check_input_dim arg:self arg:input arguments arg arg If Compare Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "view_to_reshape",
    "source_code": "def view_to_reshape(gm):\n    subgraph_names: OrderedSet[str] = OrderedSet((x.target for x in gm.graph.find_nodes(op='get_attr')))\n    for child_name, child_mod in gm.named_children():\n        if child_name in subgraph_names and isinstance(child_mod, torch.fx.GraphModule):\n            view_to_reshape(child_mod)\n    for nd in gm.graph.find_nodes(op='call_function', target=torch.ops.aten.view.default):\n        nd.target = torch.ops.aten.reshape.default",
    "docstring": "Replace view ops in the GraphModule to reshape ops.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:view_to_reshape arg:gm arguments arg Call Call For Call If BoolOp Compare Call Call For Call Assign"
  },
  {
    "library": "pytorch",
    "name": "tensor_storage_size",
    "source_code": "def tensor_storage_size(self) -> Optional[int]:\n    if self.tensor_data is None:\n        return None\n    numels = reduce(operator.mul, self.tensor_data.size, 1)\n    dtype_size = torch._utils._element_size(self.tensor_data.properties.dtype)\n    return numels * dtype_size",
    "docstring": "Calculates the storage size of the underlying tensor, or None if this is not a tensor write. Returns: Optional[int] storage size, in bytes of underlying tensor if any.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\planner.py",
    "ast_data": "FunctionDef name:tensor_storage_size arg:self arguments arg If Compare Return return:no Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_build_map_helper",
    "source_code": "def _build_map_helper(tensor, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices):\n    layer, node_index, _ = tensor._keras_history\n    node = layer._inbound_nodes[node_index]\n    if node in finished_nodes:\n        return\n    if node in nodes_in_progress:\n        raise ValueError('The tensor ' + str(tensor) + ' at layer \"' + layer.name + '\" is part of a cycle.')\n    if layer not in layer_indices:\n        layer_indices[layer] = len(layer_indices)\n    nodes_in_progress.add(node)\n    if not node.is_input:\n        for tensor in node.keras_inputs:\n            _build_map_helper(tensor, finished_nodes, nodes_in_progress, nodes_in_decreasing_depth, layer_indices)\n    finished_nodes.add(node)\n    nodes_in_progress.remove(node)\n    nodes_in_decreasing_depth.append(node)",
    "docstring": "Recursive helper for .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py",
    "ast_data": "FunctionDef name:_build_map_helper arg:tensor arg:finished_nodes arg:nodes_in_progress arg:nodes_in_decreasing_depth arg:layer_indices arguments arg arg arg arg arg Assign Assign If Compare Return return:no If Compare Raise Call Call If Compare Assign Call Call If For Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "HalfLogitLink",
    "source_code": "class HalfLogitLink(BaseLink):\n    interval_y_pred = Interval(0, 1, False, False)\n\n    def link(self, y_pred, out=None):\n        out = logit(y_pred, out=out)\n        out *= 0.5\n        return out\n\n    def inverse(self, raw_prediction, out=None):\n        return expit(2 * raw_prediction, out)",
    "docstring": "Half the logit link function g(x)=1/2 * logit(x). Used for the exponential loss.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\_loss\\link.py",
    "ast_data": "ClassDef name:HalfLogitLink Assign Call FunctionDef name:link arg:self arg:y_pred arg:out arguments arg arg arg Assign Call Return return:yes FunctionDef name:inverse arg:self arg:raw_prediction arg:out arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_operator_range",
    "source_code": "def get_operator_range(chars_range):\n    if chars_range == 'None' or chars_range is None:\n        return None\n    if all((item not in chars_range for item in [',', '-'])):\n        raise ValueError('The correct format for operator_range is <start>-<end>, or <point>, <start>-<end>')\n    ops_start_chars_set = set()\n    ranges = chars_range.split(',')\n    for item in ranges:\n        if len(item) == 1:\n            ops_start_chars_set.add(item.lower())\n            continue\n        start, end = item.split('-')\n        ops_start_chars_set.update((chr(c).lower() for c in range(ord(start), ord(end) + 1)))\n    return ops_start_chars_set",
    "docstring": "Generates the characters from chars_range inclusive.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py",
    "ast_data": "FunctionDef name:get_operator_range arg:chars_range arguments arg If BoolOp Compare Compare Return return:no If Call Compare Raise Call Assign Call Assign Call For If Compare Call Call Call Assign Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "correct_title_capitalization",
    "source_code": "def correct_title_capitalization(title: str) -> str:\n    if title[0] == ':':\n        return title\n    correct_title: str = re.sub('^\\\\W*', '', title).capitalize()\n    removed_https_title = re.sub('<https?:\\\\/\\\\/.*[\\\\r\\\\n]*>', '', correct_title)\n    word_list = re.split('\\\\W', removed_https_title)\n    for word in word_list:\n        if word.lower() in CAP_EXCEPTIONS_DICT:\n            correct_title = re.sub(f'\\\\b{word}\\\\b', CAP_EXCEPTIONS_DICT[word.lower()], correct_title)\n    return correct_title",
    "docstring": "Algorithm to create the correct capitalization for a given title. Parameters ---------- title : str Heading string to correct. Returns ------- str Correctly capitalized heading.",
    "type": "function",
    "file_path": "pandas\\scripts\\validate_rst_title_capitalization.py",
    "ast_data": "FunctionDef name:correct_title_capitalization arg:title arguments arg If Compare Return return:yes Call Call Assign Call Assign Call For If Compare Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "pipeline",
    "source_code": "def pipeline(module: torch.nn.Module, mb_args: tuple[Any, ...], mb_kwargs: Optional[dict[str, Any]]=None, split_spec: Optional[dict[str, SplitPoint]]=None, split_policy: Optional[Callable[[fx.GraphModule], fx.GraphModule]]=None) -> Pipe:\n    if split_spec is not None and split_policy is not None:\n        raise ValueError('Cannot specify both `split_spec` and `split_policy`. Please use only one of them.')\n    if split_spec is not None:\n        annotate_split_points(module, split_spec)\n        return Pipe.from_tracing(mod=module, example_args=mb_args, example_kwargs=mb_kwargs)\n    else:\n        return Pipe.from_tracing(mod=module, example_args=mb_args, example_kwargs=mb_kwargs, split_policy=split_policy)",
    "docstring": "Split a module based on a specification. See for more details. Arguments --------- module: The module to be splitted. mb_args: Example positional inputs, in micro-batch form. mb_kwargs: Example keyword inputs, in micro-batch form. (default: ) split_spec: A dictionary using submodule names as split marker. (default: ) split_policy: The policy to use for splitting the module. (default: ) Returns ------- A pipeline representation of class .",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py",
    "ast_data": "FunctionDef name:pipeline arg:module arg:mb_args arg:mb_kwargs arg:split_spec arg:split_policy arguments arg arg arg arg arg If BoolOp Compare Compare Raise Call If Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_mask_selected_obj",
    "source_code": "@final\ndef _mask_selected_obj(self, mask: npt.NDArray[np.bool_]) -> NDFrameT:\n    ids = self._grouper.ids\n    mask = mask & (ids != -1)\n    return self._selected_obj[mask]",
    "docstring": "Return _selected_obj with mask applied. Parameters ---------- mask : np.ndarray[bool] Boolean mask to apply. Returns ------- Series or DataFrame Filtered _selected_obj.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\groupby\\groupby.py",
    "ast_data": "FunctionDef name:_mask_selected_obj arg:self arg:mask arguments arg arg Assign Assign Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_reuse_ir",
    "source_code": "def set_reuse_ir(val: bool) -> None:\n    torch._C._lazy._set_reuse_ir(val)",
    "docstring": "Set the config to reuse IR nodes for faster tracing",
    "type": "function",
    "file_path": "pytorch\\torch\\_lazy\\config.py",
    "ast_data": "FunctionDef name:set_reuse_ir arg:val arguments arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "inverse_transform",
    "source_code": "def inverse_transform(self, X):\n    check_is_fitted(self)\n    xp, _ = get_namespace(X)\n    X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, dtype=_array_api.supported_float_dtypes(xp), force_writeable=True, ensure_all_finite='allow-nan')\n    if sparse.issparse(X):\n        inplace_column_scale(X, self.scale_)\n    else:\n        X *= self.scale_\n    return X",
    "docstring": "Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data that should be transformed back. Returns ------- X_original : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py",
    "ast_data": "FunctionDef name:inverse_transform arg:self arg:X arguments arg arg Call Assign Call Assign Call Call If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "python_version",
    "source_code": "def python_version(self, *, python: Path | str | None=None) -> str:\n    return self.python('-c', \"import sys; print('{0.major}.{0.minor}.{0.micro}{1}'.format(sys.version_info, getattr(sys, 'abiflags', '')))\", python=python, capture_output=True).stdout.strip()",
    "docstring": "Get the Python version for the virtual environment.",
    "type": "method",
    "file_path": "pytorch\\tools\\nightly.py",
    "ast_data": "FunctionDef name:python_version arg:self arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start(self):\n    if self.thread is None:\n        self.mtimes = {}\n    Monitor.start(self)",
    "docstring": "Start our own background task thread for self.run.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\plugins.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg If Compare Assign Call"
  },
  {
    "library": "authlib",
    "name": "register_signature_method",
    "source_code": "@classmethod\ndef register_signature_method(cls, name, sign):\n    cls.SIGNATURE_METHODS[name] = sign",
    "docstring": "Extend client signature methods. :param name: A string to represent signature method. :param sign: A function to generate signature. The `` method accept 2 parameters:: def custom_sign_method(client, request): # client is the instance of Client. return \"your-signed-string\" Client.register_signature_method(\"custom-name\", custom_sign_method)",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\client_auth.py",
    "ast_data": "FunctionDef name:register_signature_method arg:cls arg:name arg:sign arguments arg arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "_offsets_to_lengths",
    "source_code": "def _offsets_to_lengths(offsets, max_len):\n    from torch._subclasses.fake_tensor import FakeTensor\n    from torch._subclasses.functional_tensor import FunctionalTensor\n    if not isinstance(offsets, (FakeTensor, FunctionalTensor)) and offsets.device.type != 'meta':\n        return offsets.diff().tolist()\n    return [max_len] * (offsets.size(0) - 1)",
    "docstring": "If the offsets tensor is fake, then we don't know the actual lengths. In that case, we can just assume the worst case; each batch has max length.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\flop_counter.py",
    "ast_data": "FunctionDef name:_offsets_to_lengths arg:offsets arg:max_len arguments arg arg If BoolOp Call Compare Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "idct",
    "source_code": "def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):\n    type = _inverse_typemap[type]\n    return _pocketfft.dct(x, type, n, axis, norm, overwrite_x)",
    "docstring": "Return the Inverse Discrete Cosine Transform of an arbitrary type sequence. Parameters ---------- x : array_like The input array. type : {1, 2, 3, 4}, optional Type of the DCT (see Notes). Default type is 2. n : int, optional Length of the transform. If `xxxdct`. Examples -------- The Type 1 DCT is equivalent to the DFT for real, even-symmetrical inputs. The output is also real and even-symmetrical. Half of the IFFT input is used to generate half of the IFFT output: >>> from scipy.fftpack import ifft, idct >>> import numpy as np >>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real array([ 4., 3., 5., 10., 5., 3.]) >>> idct(np.array([ 30., -8., 6., -2.]), 1) / 6 array([ 4., 3., 5., 10.])",
    "type": "function",
    "file_path": "scipy\\scipy\\fftpack\\_realtransforms.py",
    "ast_data": "FunctionDef name:idct arg:x arg:type arg:n arg:axis arg:norm arg:overwrite_x arguments arg arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "django",
    "name": "Origin",
    "source_code": "class Origin:\n\n    def __init__(self, name, template_name):\n        self.name = name\n        self.template_name = template_name",
    "docstring": "A container to hold debug information as described in the template API documentation.",
    "type": "class",
    "file_path": "django\\django\\template\\backends\\jinja2.py",
    "ast_data": "ClassDef name:Origin FunctionDef name:__init__ arg:self arg:name arg:template_name arguments arg arg arg Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self, path: str, token: Optional[str]=None) -> None:\n    from huggingface_hub import HfFileSystem\n    if HfFileSystem.protocol not in fsspec.available_protocols():\n        fsspec.register_implementation(HfFileSystem.protocol, HfFileSystem)\n    if token is not None:\n        super().__init__(path=path, token=token)\n    else:\n        super().__init__(path=path)\n    self.storage_data: dict[str, str] = {}",
    "docstring": "Initialize the huggingface reader pointing to path. Args: path: hf directory where the checkpoint will be read from. Should begin with hf://. token: The token to use to authenticate with huggingface hub.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_hf_storage.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:path arg:token arguments arg arg arg If Compare Call Call If Compare Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "ignore_headers",
    "source_code": "def ignore_headers(headers=('Range',), debug=False):\n    request = cherrypy.serving.request\n    for name in headers:\n        if name in request.headers:\n            if debug:\n                cherrypy.log('Ignoring request header %r' % name, 'TOOLS.IGNORE_HEADERS')\n            del request.headers[name]",
    "docstring": "Delete request headers whose field names are included in 'headers'. This is a useful tool for working behind certain HTTP servers; for example, Apache duplicates the work that CP does for 'Range' headers, and will doubly-truncate the response.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:ignore_headers arg:headers arg:debug arguments arg arg Assign For If Compare If Call"
  },
  {
    "library": "numpy",
    "name": "_recursive_filled",
    "source_code": "def _recursive_filled(a, mask, fill_value):\n    names = a.dtype.names\n    for name in names:\n        current = a[name]\n        if current.dtype.names is not None:\n            _recursive_filled(current, mask[name], fill_value[name])\n        else:\n            np.copyto(current, fill_value[name], where=mask[name])",
    "docstring": "Recursively fill with .",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:_recursive_filled arg:a arg:mask arg:fill_value arguments arg arg arg Assign For Assign If Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_xaxis_text1_transform",
    "source_code": "def get_xaxis_text1_transform(self, pad_points):\n    labels_align = mpl.rcParams['xtick.alignment']\n    return (self.get_xaxis_transform(which='tick1') + mtransforms.ScaledTranslation(0, -1 * pad_points / 72, self.get_figure(root=False).dpi_scale_trans), 'top', labels_align)",
    "docstring": "Returns ------- transform : Transform The transform used for drawing x-axis labels, which will add *pad_points* of padding (in points) between the axis and the label. The x-direction is in data coordinates and the y-direction is in axis coordinates valign : {'center', 'top', 'bottom', 'baseline', 'center_baseline'} The text vertical alignment. halign : {'center', 'left', 'right'} The text horizontal alignment. Notes ----- This transformation is primarily used by the class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:get_xaxis_text1_transform arg:self arg:pad_points arguments arg arg Assign Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "stop",
    "source_code": "def stop(self):\n    self._task_state_poller_thread.stop()\n    self._should_preemption_thread_run = False\n    with self._cluster_update_lock:\n        self._cluster_due_for_update_or_finish.set()",
    "docstring": "Ensure the worker preemption thread is closed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Call Assign With Call"
  },
  {
    "library": "tensorflow",
    "name": "_tf_data_yield_value",
    "source_code": "def _tf_data_yield_value(iterable):\n    if isinstance(iterable, _collections_abc.Mapping):\n        for key in _tf_data_sorted(iterable):\n            yield iterable[key]\n    elif iterable.__class__.__name__ == 'SparseTensorValue':\n        yield iterable\n    elif _is_attrs(iterable):\n        for _, attr in _get_attrs_items(iterable):\n            yield attr\n    elif isinstance(iterable, CustomNestProtocol):\n        flat_component = iterable.__tf_flatten__()[1]\n        assert isinstance(flat_component, tuple)\n        yield from flat_component\n    else:\n        for value in iterable:\n            yield value",
    "docstring": "Yield elements of in a deterministic order. Args: iterable: an iterable. Yields: The iterable elements in a deterministic order.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:_tf_data_yield_value arg:iterable arguments arg If Call For Call If Compare If Call For Call If Call Assign Call Call For"
  },
  {
    "library": "tensorflow",
    "name": "GlorotNormal",
    "source_code": "@tf_export(v1=['glorot_normal_initializer', 'initializers.glorot_normal'])\n@deprecation.deprecated_endpoints('glorot_normal_initializer', 'initializers.glorot_normal')\nclass GlorotNormal(VarianceScaling):\n\n    @deprecated_args(None, 'Call initializer instance with the dtype argument instead of passing it to the constructor', 'dtype')\n    def __init__(self, seed=None, dtype=dtypes.float32):\n        super(GlorotNormal, self).__init__(scale=1.0, mode='fan_avg', distribution='truncated_normal', seed=seed)\n\n    def get_config(self):\n        return {'seed': self.seed, 'dtype': self.dtype.name}",
    "docstring": "The Glorot normal initializer, also called Xavier normal initializer. It draws samples from a truncated normal distribution centered on 0 with standard deviation (after truncation) given by where is the number of input units in the weight tensor and is the number of output units in the weight tensor. Args: seed: A Python integer. Used to create random seeds. See for behavior. dtype: Default data type, used if no argument is provided when calling the initializer. Only floating point types are supported. References: [Glorot et al., 2010]( ([pdf](",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops.py",
    "ast_data": "ClassDef name:GlorotNormal FunctionDef name:__init__ arg:self arg:seed arg:dtype arguments arg arg arg Call Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, x, *, axis=-1):\n    x = np.asarray(x)\n    if x.shape[axis] != self.n:\n        raise ValueError(f'CZT defined for length {self.n}, not {x.shape[axis]}')\n    trnsp = np.arange(x.ndim)\n    trnsp[[axis, -1]] = [-1, axis]\n    x = x.transpose(*trnsp)\n    y = ifft(self._Fwk2 * fft(x * self._Awk2, self._nfft))\n    y = y[..., self._yidx] * self._wk2\n    return y.transpose(*trnsp)",
    "docstring": "Calculate the chirp z-transform of a signal. Parameters ---------- x : array The signal to transform. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. Returns ------- out : ndarray An array of the same dimensions as , but with the length of the transformed axis set to .",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_czt.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arguments arg arg arg Assign Call If Compare Raise Call Assign Call Assign Assign Call Assign Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "restore_ops",
    "source_code": "def restore_ops(self, reader=None):\n    if self._has_registered_saver():\n        raise ValueError('Unable to run individual checkpoint restore for objects with registered savers.')\n    restore_ops, tensor_saveables, python_positions, _ = self.gather_ops_or_named_saveables()\n    restore_ops.extend(self._checkpoint.restore_saveables(tensor_saveables, python_positions, reader=reader))\n    return restore_ops",
    "docstring": "Create or fetch restore ops for this object's attributes. Requires that the Python object has been bound to an object ID in the checkpoint. Args: reader: A . If None, a new instance will be created. Returns: A list of operations when graph building, or an empty list when executing eagerly.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py",
    "ast_data": "FunctionDef name:restore_ops arg:self arg:reader arguments arg arg If Call Raise Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "outputs",
    "source_code": "@property\ndef outputs(self) -> List[tensor_lib.Tensor]:\n    return self._outputs",
    "docstring": "Outputs of all the Exit nodes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:outputs arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "admin_actions",
    "source_code": "def admin_actions(context):\n    context['action_index'] = context.get('action_index', -1) + 1\n    return context",
    "docstring": "Track the number of times the action field has been rendered on the page, so we know which value to use.",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\templatetags\\admin_list.py",
    "ast_data": "FunctionDef name:admin_actions arg:context arguments arg Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "masked_invalid",
    "source_code": "def masked_invalid(a, copy=True):\n    a = np.array(a, copy=None, subok=True)\n    res = masked_where(~np.isfinite(a), a, copy=copy)\n    if res._mask is nomask:\n        res._mask = make_mask_none(res.shape, res.dtype)\n    return res",
    "docstring": "Mask an array where invalid values occur (NaNs or infs). This function is a shortcut to `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. Only applies to arrays with a dtype where NaNs or infs make sense (i.e. floating point types), but accepts any array_like object. See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(5, dtype=float) >>> a[2] = np.nan >>> a[3] = np.inf >>> a array([ 0., 1., nan, inf, 4.]) >>> ma.masked_invalid(a) masked_array(data=[0.0, 1.0, --, --, 4.0], mask=[False, False, True, True, False], fill_value=1e+20)",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:masked_invalid arg:a arg:copy arguments arg arg Assign Call Assign Call Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    X = validate_data(self, X, accept_sparse='csr', reset=False, ensure_non_negative=True)\n    sparse = sp.issparse(X)\n    if self.sample_interval is None:\n        if self.sample_steps == 1:\n            sample_interval = 0.8\n        elif self.sample_steps == 2:\n            sample_interval = 0.5\n        elif self.sample_steps == 3:\n            sample_interval = 0.4\n        else:\n            raise ValueError('If sample_steps is not in [1, 2, 3], you need to provide sample_interval')\n    else:\n        sample_interval = self.sample_interval\n    transf = self._transform_sparse if sparse else self._transform_dense\n    return transf(X, self.sample_steps, sample_interval)",
    "docstring": "Apply approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. Returns ------- X_new : {ndarray, sparse matrix}, shape = (n_samples, n_features * (2*sample_steps - 1)) Whether the return value is an array or sparse matrix depends on the type of the input X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\kernel_approximation.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Assign Call Assign Call If Compare If Compare Assign If Compare Assign If Compare Assign Raise Call Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "from_choice_args",
    "source_code": "@classmethod\ndef from_choice_args(cls, example_inputs: list[torch.Tensor], example_inputs_extern: list[torch.Tensor], out: torch.Tensor, out_extern: torch.Tensor, expected: Optional[torch.Tensor]=None) -> Self:\n    return cls(triton=BenchmarkTensors(example_inputs, out), extern=BenchmarkTensors(example_inputs_extern, out_extern), expected=expected)",
    "docstring": "Factory method to create AutotuneInputs from separate inputs/outputs",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py",
    "ast_data": "FunctionDef name:from_choice_args arg:cls arg:example_inputs arg:example_inputs_extern arg:out arg:out_extern arg:expected arguments arg arg arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_replica",
    "source_code": "def _get_replica(self, replica_id):\n    value = self._values[replica_id]\n    if self._use_packed_variable():\n        return self._packed_var.on_device(value.device)\n    else:\n        return value",
    "docstring": "Returns the value on a device with the given replica_id.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_get_replica arg:self arg:replica_id arguments arg arg Assign If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_new_IntervalIndex",
    "source_code": "def _new_IntervalIndex(cls, d):\n    return cls.from_arrays(**d)",
    "docstring": "This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:_new_IntervalIndex arg:cls arg:d arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "file_crc32",
    "source_code": "def file_crc32(filename, block_size=_DEFAULT_BLOCK_SIZE):\n    crc = 0\n    with FileIO(filename, mode='rb') as f:\n        chunk = f.read(n=block_size)\n        while chunk:\n            crc = binascii.crc32(chunk, crc)\n            chunk = f.read(n=block_size)\n    return hex(crc & 4294967295)",
    "docstring": "Get the crc32 of the passed file. The crc32 of a file can be used for error checking; two files with the same crc32 are considered equivalent. Note that the entire file must be read to produce the crc32. Args: filename: string, path to a file block_size: Integer, process the files by reading blocks of bytes. Use -1 to read the file as once. Returns: hexadecimal as string, the crc32 of the passed file.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:file_crc32 arg:filename arg:block_size arguments arg arg Assign With Call Assign Call While Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "label",
    "source_code": "def label(self, formatter: Formatter | None=None, *, like: str | Callable | None=None, base: int | None | Default=default, unit: str | None=None) -> Continuous:\n    if formatter is not None and (not isinstance(formatter, Formatter)):\n        raise TypeError(f'Label formatter must be an instance of {Formatter!r}, not {type(formatter)!r}')\n    if like is not None and (not (isinstance(like, str) or callable(like))):\n        msg = f'`like` must be a string or callable, not {type(like).__name__}.'\n        raise TypeError(msg)\n    new = copy(self)\n    new._label_params = {'formatter': formatter, 'like': like, 'base': base, 'unit': unit}\n    return new",
    "docstring": "Configure the appearance of tick labels for the scale's axis or legend. Parameters ---------- formatter : :class: subclass Pre-configured formatter to use; other parameters will be ignored. like : str or callable Either a format pattern (e.g., ), a format string with fields named and/or (e.g., ), or a callable with a signature like . In the latter variants, is passed as the tick value and is passed as the tick index. base : number Use log formatter (with scientific notation) having this value as the base. Set to to override the default formatter with a log transform. unit : str or (str, str) tuple Use SI prefixes with these units (e.g., with , a tick value of 5000 will appear as ). When a tuple, the first element gives the separator between the number and unit. Returns ------- scale Copy of self with new label configuration.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\scales.py",
    "ast_data": "FunctionDef name:label arg:self arg:formatter arguments arg arg arg arg arg If BoolOp Compare Call Raise Call Call If BoolOp Compare BoolOp Call Call Assign Call Raise Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, num_steps=None, last_step=None, steps_per_run=1):\n    if num_steps is None and last_step is None:\n        raise ValueError('One of num_steps or last_step must be specified.')\n    if num_steps is not None and last_step is not None:\n        raise ValueError('Only one of num_steps or last_step can be specified.')\n    if steps_per_run is None or steps_per_run < 1:\n        raise ValueError('steps_per_run should be greater than 0')\n    self._num_steps = num_steps\n    self._last_step = last_step\n    self._steps_per_run_initial_value = steps_per_run",
    "docstring": "Initializes a . This hook requests stop after either a number of steps have been executed or a last step has been reached. Only one of the two options can be specified. if is specified, it indicates the number of steps to execute after is called. If instead is specified, it indicates the last step we want to execute, as passed to the call. In Estimator, the user provided computation, the model_fn, is wrapped inside a tf.while_loop for peak performance. The steps_per_run variable determines the number of iterations of the loop before returning to the CPU. Args: num_steps: Number of steps to execute. last_step: Step after which to stop. steps_per_run: Number of steps executed per run call. Raises: ValueError: If one of the arguments is invalid.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:num_steps arg:last_step arg:steps_per_run arguments arg arg arg arg If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call If BoolOp Compare Compare Raise Call Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "transform_non_affine",
    "source_code": "def transform_non_affine(self, values):\n    return values",
    "docstring": "Apply only the non-affine part of this transformation. `~matplotlib.transforms.Transform.input_dims~matplotlib.transforms.Transform.input_dims~matplotlib.transforms.Transform.output_dims~matplotlib.transforms.Transform.output_dims`), depending on the input.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:transform_non_affine arg:self arg:values arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "@deprecated('`load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead.', category=FutureWarning)\ndef load_state_dict(state_dict: dict[str, Any], storage_reader: StorageReader, process_group: Optional[dist.ProcessGroup]=None, coordinator_rank: int=0, no_dist: bool=False, planner: Optional[LoadPlanner]=None) -> None:\n    storage_reader.reset()\n    with _profile():\n        return _load_state_dict(state_dict, storage_reader, process_group, coordinator_rank, no_dist, planner)",
    "docstring": "This method is deprecated. Please switch to 'load'.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict_loader.py",
    "ast_data": "FunctionDef name:load_state_dict arg:state_dict arg:storage_reader arg:process_group arg:coordinator_rank arg:no_dist arg:planner arguments arg arg arg arg arg arg Call With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "transform_function",
    "source_code": "def transform_function(self, fn, user_context):\n    cache_subkey = self.get_caching_key(user_context)\n    if self._cache.has(fn, cache_subkey):\n        factory = self._cached_factory(fn, cache_subkey)\n    else:\n        with self._cache_lock:\n            if self._cache.has(fn, cache_subkey):\n                factory = self._cached_factory(fn, cache_subkey)\n            else:\n                logging.log(1, '%s is not cached for subkey %s', fn, cache_subkey)\n                nodes, ctx = super(PyToPy, self).transform_function(fn, user_context)\n                if isinstance(nodes, gast.Lambda):\n                    nodes = gast.Assign(targets=[gast.Name(ctx.info.name, ctx=gast.Store(), annotation=None, type_comment=None)], value=nodes)\n                else:\n                    nodes.name = ctx.info.name\n                if logging.has_verbosity(2):\n                    logging.log(2, 'Transformed %s:\\n\\n%s\\n', fn, parser.unparse(nodes))\n                factory = _PythonFnFactory(ctx.info.name, fn.__code__.co_freevars, self.get_extra_locals())\n                factory.create(nodes, ctx.namer, future_features=ctx.info.future_features)\n                self._cache[fn][cache_subkey] = factory\n    transformed_fn = factory.instantiate(globals_=fn.__globals__, closure=fn.__closure__ or (), defaults=fn.__defaults__, kwdefaults=getattr(fn, '__kwdefaults__', None))\n    return (transformed_fn, factory.module, factory.source_map)",
    "docstring": "Transforms a function. See GenericTranspiler.transform_function. This overload wraps the parent's , adding caching and facilities to instantiate the output as a Python object. It also adds facilities to make new symbols available to the generated Python code, visible as local variables - see . Args: fn: A function or lambda. user_context: An opaque object (may be None) that is forwarded to transform_ast, through the ctx.user attribute. Returns: A tuple: * A function or lambda with the same signature and closure as * The temporary module into which the transformed function was loaded * The source map as a Dict[origin_info.LineLocation, origin_info.OriginInfo]",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py",
    "ast_data": "FunctionDef name:transform_function arg:self arg:fn arg:user_context arguments arg arg arg Assign Call If Call Assign Call With If Call Assign Call Call Assign Call Call If Call Assign Call Call Call Assign If Call Call Call Assign Call Call Call Assign Assign Call BoolOp Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_pos_and_bbox",
    "source_code": "def get_pos_and_bbox(ax, renderer):\n    fig = ax.get_figure(root=False)\n    pos = ax.get_position(original=True)\n    pos = pos.transformed(fig.transSubfigure - fig.transFigure)\n    tightbbox = martist._get_tightbbox_for_layout_only(ax, renderer)\n    if tightbbox is None:\n        bbox = pos\n    else:\n        bbox = tightbbox.transformed(fig.transFigure.inverted())\n    return (pos, bbox)",
    "docstring": "Get the position and the bbox for the Axes. Parameters ---------- ax : renderer : subclass. Returns ------- pos : Position in figure coordinates. bbox : Tight bounding box in figure coordinates.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_constrained_layout.py",
    "ast_data": "FunctionDef name:get_pos_and_bbox arg:ax arg:renderer arguments arg arg Assign Call Assign Call Assign Call Assign Call If Compare Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "update_config_proto",
    "source_code": "def update_config_proto(self, config_proto):\n    return self._extended._update_config_proto(config_proto)",
    "docstring": "Returns a copy of modified for use with this strategy. DEPRECATED: This method is not available in TF 2.x. The updated config has something needed to run a strategy, e.g. configuration to run collective ops, or device filters to improve distributed training performance. Args: config_proto: a object. Returns: The updated copy of the .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:update_config_proto arg:self arg:config_proto arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "warmup",
    "source_code": "def warmup():\n    start('')\n    stop(save=False)",
    "docstring": "Warm-up the profiler session. The profiler session will set up profiling context, including loading CUPTI library for GPU profiling. This is used for improving the accuracy of the profiling results.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\profiler_v2.py",
    "ast_data": "FunctionDef name:warmup arguments Call Call"
  },
  {
    "library": "pytorch",
    "name": "success",
    "source_code": "@property\ndef success(self) -> bool:\n    return self.exported_program is not None",
    "docstring": "Whether the capture was successful. An exception can still be recorded even if the capture was successful. In this case the exception is informational only. For example, draft_export can record an exception if there are warnings during the export. The exceptions will go into the onnx export report when report=True.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_capture_strategies.py",
    "ast_data": "FunctionDef name:success arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "sphinx",
    "name": "MessagePrefixFilter",
    "source_code": "class MessagePrefixFilter(logging.Filter):\n\n    def __init__(self, prefix: str) -> None:\n        self.prefix = prefix\n        super().__init__()\n\n    def filter(self, record: logging.LogRecord) -> bool:\n        if self.prefix:\n            record.msg = self.prefix + ' ' + record.msg\n        return True",
    "docstring": "Prepend prefix to all log records.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\util\\logging.py",
    "ast_data": "ClassDef name:MessagePrefixFilter FunctionDef name:__init__ arg:self arg:prefix arguments arg arg Assign Call Call FunctionDef name:filter arg:self arg:record arguments arg arg If Assign Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "theme_context",
    "source_code": "@contextmanager\ndef theme_context(params: dict[str, Any]) -> Generator:\n    orig_params = {k: mpl.rcParams[k] for k in params}\n    color_codes = 'bgrmyck'\n    nice_colors = [*color_palette('deep6'), (0.15, 0.15, 0.15)]\n    orig_colors = [mpl.colors.colorConverter.colors[x] for x in color_codes]\n    try:\n        mpl.rcParams.update(params)\n        for code, color in zip(color_codes, nice_colors):\n            mpl.colors.colorConverter.colors[code] = color\n        yield\n    finally:\n        mpl.rcParams.update(orig_params)\n        for code, color in zip(color_codes, orig_colors):\n            mpl.colors.colorConverter.colors[code] = color",
    "docstring": "Temporarily modify specifc matplotlib rcParams.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:theme_context arg:params arguments arg Assign Assign Assign Call Assign Try Call For Call Assign Call For Call Assign"
  },
  {
    "library": "pytorch",
    "name": "get",
    "source_code": "def get(self, b: bool) -> IntLikeType:\n    return cast_symbool_to_symint_guardless(b)",
    "docstring": "Get the int value from bool",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:get arg:self arg:b arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_NodeDescGenerator",
    "source_code": "class _NodeDescGenerator:\n    _lock: threading.Lock\n    _local_id: int\n\n    def __init__(self) -> None:\n        self._lock = threading.Lock()\n        self._local_id = 0\n\n    def generate(self, local_addr: Optional[str]=None) -> _NodeDesc:\n        with self._lock:\n            local_id = self._local_id\n            self._local_id += 1\n        return _NodeDesc(local_addr or socket.getfqdn(), os.getpid(), local_id)",
    "docstring": "Generate node descriptors. A node descriptor is a combination of an FQDN, a process id, and an auto- incremented integer that uniquely identifies a node in the rendezvous.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "ClassDef name:_NodeDescGenerator FunctionDef name:__init__ arg:self arguments arg Assign Call Assign FunctionDef name:generate arg:self arg:local_addr arguments arg arg With Assign Return return:yes Call BoolOp Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_path_to_datafile",
    "source_code": "@tf_export(v1=['resource_loader.get_path_to_datafile'])\ndef get_path_to_datafile(path):\n    if runfiles:\n        r = runfiles.Create()\n        new_fpath = r.Rlocation(_os.path.abspath(_os.path.join('tensorflow', path)))\n        if new_fpath is not None and _os.path.exists(new_fpath):\n            return new_fpath\n    old_filepath = _os.path.join(_os.path.dirname(_inspect.getfile(_sys._getframe(1))), path)\n    return old_filepath",
    "docstring": "Get the path to the specified file in the data dependencies. The path is relative to tensorflow/ Args: path: a string resource path relative to tensorflow/ Returns: The path to the specified file present in the data attribute of py_test or py_binary. Raises: IOError: If the path is not found, or the resource can't be opened.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\resource_loader.py",
    "ast_data": "FunctionDef name:get_path_to_datafile arg:path arguments arg If Assign Call Assign Call Call Call If BoolOp Compare Call Return return:yes Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "issues2dict",
    "source_code": "def issues2dict(issues):\n    return {i['number']: i for i in issues}",
    "docstring": "Convert a list of issues to a dict, keyed by issue number.",
    "type": "function",
    "file_path": "matplotlib\\tools\\github_stats.py",
    "ast_data": "FunctionDef name:issues2dict arg:issues arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "fast_xs",
    "source_code": "def fast_xs(self, loc):\n    raise NotImplementedError('Use series._values[loc] instead')",
    "docstring": "fast path for getting a cross-section return a view of the data",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:fast_xs arg:self arg:loc arguments arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "names",
    "source_code": "def names(self):\n    return list(self._attributes)",
    "docstring": "Return the list of attribute names. Returns ------- attrnames : list of str The attribute names.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:names arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "tosparse",
    "source_code": "def tosparse(self):\n    from scipy.sparse import diags_array\n    d = self.tobanded()\n    return diags_array([d[0], d[1], d[2], d[1], d[0]], offsets=[-2, -1, 0, 1, 2], shape=(self.n, self.n), dtype=d.dtype)",
    "docstring": "Construct the Sakurai matrix in a sparse format.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:tosparse arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "cumsum",
    "source_code": "def cumsum(self, axis: AxisInt=0, *args, **kwargs) -> SparseArray:\n    nv.validate_cumsum(args, kwargs)\n    if axis is not None and axis >= self.ndim:\n        raise ValueError(f'axis(={axis}) out of bounds')\n    if not self._null_fill_value:\n        return SparseArray(self.to_dense()).cumsum()\n    return SparseArray(self.sp_values.cumsum(), sparse_index=self.sp_index, fill_value=self.fill_value)",
    "docstring": "Cumulative sum of non-NA/null values. When performing the cumulative summation, any non-NA/null values will be skipped. The resulting SparseArray will preserve the locations of NaN values, but the fill value will be regardless. Parameters ---------- axis : int or None Axis over which to perform the cumulative summation. If None, perform cumulative summation over flattened array. Returns ------- cumsum : SparseArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:cumsum arg:self arg:axis arguments arg arg arg arg Call If BoolOp Compare Compare Raise Call If Return return:yes Call Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_label_position",
    "source_code": "def _update_label_position(self, renderer):\n    if not self._autolabelpos:\n        return\n    bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)\n    x, y = self.label.get_position()\n    if self.label_position == 'left':\n        bbox = mtransforms.Bbox.union([*bboxes, self.axes.spines.get('left', self.axes).get_window_extent()])\n        self.label.set_position((bbox.x0 - self.labelpad * self.get_figure(root=True).dpi / 72, y))\n    else:\n        bbox = mtransforms.Bbox.union([*bboxes2, self.axes.spines.get('right', self.axes).get_window_extent()])\n        self.label.set_position((bbox.x1 + self.labelpad * self.get_figure(root=True).dpi / 72, y))",
    "docstring": "Update the label position based on the bounding box enclosing all the ticklabels and axis spine",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_update_label_position arg:self arg:renderer arguments arg arg If Return return:no Assign Call Assign Call If Compare Assign Call Call Call Call Call Assign Call Call Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "_JavaScriptIndex",
    "source_code": "class _JavaScriptIndex:\n    PREFIX = 'Search.setIndex('\n    SUFFIX = ')'\n\n    def dumps(self, data: Any) -> str:\n        data_json = json.dumps(data, separators=(',', ':'), sort_keys=True)\n        return self.PREFIX + data_json + self.SUFFIX\n\n    def loads(self, s: str) -> Any:\n        data = s[len(self.PREFIX):-len(self.SUFFIX)]\n        if not data or not s.startswith(self.PREFIX) or (not s.endswith(self.SUFFIX)):\n            msg = 'invalid data'\n            raise ValueError(msg)\n        return json.loads(data)\n\n    def dump(self, data: Any, f: _WritableStream[str]) -> None:\n        f.write(self.dumps(data))\n\n    def load(self, f: _ReadableStream[str]) -> Any:\n        return self.loads(f.read())",
    "docstring": "The search index as JavaScript file that calls a function on the documentation search object to register the index.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\search\\__init__.py",
    "ast_data": "ClassDef name:_JavaScriptIndex Assign Assign FunctionDef name:dumps arg:self arg:data arguments arg arg Assign Call Return return:yes FunctionDef name:loads arg:self arg:s arguments arg arg Assign Call Call If BoolOp Call Call Assign Raise Call Return return:yes Call FunctionDef name:dump arg:self arg:data arg:f arguments arg arg arg Call Call FunctionDef name:load arg:self arg:f arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_handle_weight_regularization",
    "source_code": "def _handle_weight_regularization(self, name, variable, regularizer):\n\n    def _loss_for_variable(v):\n        with backend.name_scope(name + '/Regularizer'):\n            regularization = regularizer(v)\n        return regularization\n    if base_layer_utils.is_split_variable(variable):\n        for v in variable:\n            self.add_loss(functools.partial(_loss_for_variable, v))\n    else:\n        self.add_loss(functools.partial(_loss_for_variable, variable))",
    "docstring": "Create lambdas which compute regularization losses.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:_handle_weight_regularization arg:self arg:name arg:variable arg:regularizer arguments arg arg arg arg FunctionDef name:_loss_for_variable arg:v arguments arg With Call Assign Call Return return:yes If Call For Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "args",
    "source_code": "@property\ndef args(self) -> tuple[Argument, ...]:\n    return self._args",
    "docstring": "The tuple of arguments to this `Node` docstring for more information. Assignment to this property is allowed. All accounting of uses and users is updated automatically on assignment.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:args arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "parse_attribute",
    "source_code": "@classmethod\ndef parse_attribute(cls, name, attr_string):\n    attr_string_lower = attr_string.lower().strip()\n    if attr_string_lower[:len('relational')] == 'relational':\n        return cls(name)\n    else:\n        return None",
    "docstring": "Parse the attribute line if it knows how. Returns the parsed attribute, or None. For date attributes, the attribute string would be like 'date '.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:parse_attribute arg:cls arg:name arg:attr_string arguments arg arg arg Assign Call Call If Compare Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_to_bytes",
    "source_code": "def _to_bytes(s):\n    if isinstance(s, _six.text_type):\n        return s.encode('utf-8', errors='surrogateescape')\n    return s",
    "docstring": "Encode s if it is a sequence of chars.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\trt_convert.py",
    "ast_data": "FunctionDef name:_to_bytes arg:s arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_desired_reports_names",
    "source_code": "def get_desired_reports_names(self) -> set[str]:\n    return self._desired_detector_names.copy()",
    "docstring": "Returns a copy of the desired reports for viewing",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:get_desired_reports_names arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "forward",
    "source_code": "def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, tgt_is_causal: bool=False, memory_is_causal: bool=False) -> Tensor:\n    x = tgt\n    if self.norm_first:\n        x = x + self._sa_block(self.norm1(x), tgt_mask, tgt_key_padding_mask, tgt_is_causal)\n        x = x + self._mha_block(self.norm2(x), memory, memory_mask, memory_key_padding_mask, memory_is_causal)\n        x = x + self._ff_block(self.norm3(x))\n    else:\n        x = self.norm1(x + self._sa_block(x, tgt_mask, tgt_key_padding_mask, tgt_is_causal))\n        x = self.norm2(x + self._mha_block(x, memory, memory_mask, memory_key_padding_mask, memory_is_causal))\n        x = self.norm3(x + self._ff_block(x))\n    return x",
    "docstring": "Pass the inputs (and mask) through the decoder layer. Args: tgt: the sequence to the decoder layer (required). memory: the sequence from the last layer of the encoder (required). tgt_mask: the mask for the tgt sequence (optional). memory_mask: the mask for the memory sequence (optional). tgt_key_padding_mask: the mask for the tgt keys per batch (optional). memory_key_padding_mask: the mask for the memory keys per batch (optional). tgt_is_causal: If specified, applies a causal mask as `~torch.nn.Transformer`.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\transformer.py",
    "ast_data": "FunctionDef name:forward arg:self arg:tgt arg:memory arg:tgt_mask arg:memory_mask arg:tgt_key_padding_mask arg:memory_key_padding_mask arg:tgt_is_causal arg:memory_is_causal arguments arg arg arg arg arg arg arg arg arg Assign If Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load_state_dict",
    "source_code": "@override\ndef load_state_dict(self, state_dict: dict[str, Any]) -> None:\n    lr_lambdas = state_dict.pop('lr_lambdas')\n    self.__dict__.update(state_dict)\n    state_dict['lr_lambdas'] = lr_lambdas\n    for idx, fn in enumerate(lr_lambdas):\n        if fn is not None:\n            self.lr_lambdas[idx].__dict__.update(fn)",
    "docstring": "Load the scheduler's state. When saving or loading the scheduler, please make sure to also save or load the state of the optimizer. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:.",
    "type": "method",
    "file_path": "pytorch\\torch\\optim\\lr_scheduler.py",
    "ast_data": "FunctionDef name:load_state_dict arg:self arg:state_dict arguments arg arg Assign Call Call Assign For Call If Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "experimental_set_strategy",
    "source_code": "@tf_export('distribute.experimental_set_strategy')\ndef experimental_set_strategy(strategy):\n    old_scope = ops.get_default_graph()._global_distribute_strategy_scope\n    if old_scope is not None:\n        old_scope.__exit__(None, None, None)\n        ops.get_default_graph()._global_distribute_strategy_scope = None\n    if has_strategy():\n        raise RuntimeError('Must not be called inside a `tf.distribute.Strategy` scope.')\n    if strategy is not None:\n        new_scope = strategy.scope()\n        new_scope.__enter__()\n        ops.get_default_graph()._global_distribute_strategy_scope = new_scope",
    "docstring": "Set a as current without . is equivalent to: In general, you should use the API, but this alternative may be convenient in notebooks where you would have to put each cell in a block. Note: This should only be called outside of any TensorFlow scope to avoid improper nesting. Args: strategy: A object or None. Raises: RuntimeError: If called inside a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:experimental_set_strategy arg:strategy arguments arg Assign Call If Compare Call Assign Call If Call Raise Call If Compare Assign Call Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_colorizer_check_keywords",
    "source_code": "def _set_colorizer_check_keywords(self, colorizer, **kwargs):\n    self._check_exclusionary_keywords(colorizer, **kwargs)\n    self.colorizer = colorizer",
    "docstring": "Raises a ValueError if any kwarg is not None while colorizer is not None.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:_set_colorizer_check_keywords arg:self arg:colorizer arguments arg arg arg Call Assign"
  },
  {
    "library": "pandas",
    "name": "lazy_load_stub_copy",
    "source_code": "def lazy_load_stub_copy(text):\n    global copy, paste\n    copy, paste = determine_clipboard()\n    return copy(text)",
    "docstring": "A stub function for copy(), which will load the real copy() function when called so that the real copy() function is used for later calls. This allows users to import pyperclip without having determine_clipboard() automatically run, which will automatically select a clipboard mechanism. This could be a problem if it selects, say, the memory-heavy PyQt4 module but the user was just going to immediately call set_clipboard() to use a different clipboard mechanism. The lazy loading this stub function implements gives the user a chance to call set_clipboard() to pick another clipboard mechanism. Or, if the user simply calls copy() or paste() without calling set_clipboard() first, will fall back on whatever clipboard mechanism that determine_clipboard() automatically chooses.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\clipboard\\__init__.py",
    "ast_data": "FunctionDef name:lazy_load_stub_copy arg:text arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_get_row_ranges",
    "source_code": "def _get_row_ranges(self, rows, col_slice):\n    j_start, j_stop, j_stride = col_slice.indices(self.shape[1])\n    col_range = range(j_start, j_stop, j_stride)\n    nj = len(col_range)\n    new = self._lil_container((len(rows), nj), dtype=self.dtype)\n    _csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1], self.rows, self.data, new.rows, new.data, rows, j_start, j_stop, j_stride, nj)\n    return new",
    "docstring": "Fast path for indexing in the case where column index is slice. This gains performance improvement over brute force by more efficient skipping of zeros, by accessing the elements column-wise in order. Parameters ---------- rows : sequence or range Rows indexed. If range, must be within valid bounds. col_slice : slice Columns indexed",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\_lil.py",
    "ast_data": "FunctionDef name:_get_row_ranges arg:self arg:rows arg:col_slice arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "classes_",
    "source_code": "@property\ndef classes_(self):\n    _search_estimator_has('classes_')(self)\n    return self.best_estimator_.classes_",
    "docstring": "Class labels. Only available when and the estimator is a classifier.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:classes_ arg:self arguments arg Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_store_line",
    "source_code": "def _get_store_line(self, value: Union[str, CppCSEVariable], var: str, index: sympy.Expr, dtype: torch.dtype, accu_store: bool=False):\n    assert isinstance(value, str) or (isinstance(value, CppCSEVariable) and value.is_vec), value\n    tiling_var = self.itervars[self.tiling_idx]\n    var_expr = f'{var} + {cexpr_index(index)}'\n    stride = self._try_get_const_stride(index, tiling_var)\n    code = IndentedBuffer()\n    if stride == 1:\n        if accu_store:\n            load = f'{self._get_vec_type(dtype)}::loadu({var_expr})' if dtype == torch.float and self.tail_size is None else f'{self._get_vec_type(dtype)}::loadu({var_expr}, {cexpr_index(self.num_elems)})'\n            value = f'({value} + {load})'\n        if dtype == torch.float and self.tail_size is None:\n            code.writeline(f'{value}.store({var_expr});')\n        else:\n            code.writeline(f'{value}.store({var_expr}, {cexpr_index(self.num_elems)});')\n    else:\n        self._load_or_store_non_contiguous(var, index, dtype, buffer=code, store_value=value, accu_store=accu_store)\n    return code",
    "docstring": "Get a store line buffer that stores into at of . It handles both contiguous and non-contiguous store cases. :param value: Vectorized type templaterized on . :param var: buffer to store into. :index: index into the .",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:_get_store_line arg:self arg:value arg:var arg:index arg:dtype arg:accu_store arguments arg arg arg arg arg arg BoolOp Call BoolOp Call Assign Assign Call Assign Call Assign Call If Compare If Assign BoolOp Compare Compare Call Call Call Assign If BoolOp Compare Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_factorialx_array_exact",
    "source_code": "def _factorialx_array_exact(n, k=1):\n    un = np.unique(n)\n    un = un[~np.isnan(un)]\n    if np.isnan(n).any():\n        dt = float\n    elif k in _FACTORIALK_LIMITS_64BITS.keys():\n        if un[-1] > _FACTORIALK_LIMITS_64BITS[k]:\n            dt = object\n        elif un[-1] > _FACTORIALK_LIMITS_32BITS[k]:\n            dt = np.int64\n        else:\n            dt = np.dtype('long')\n    else:\n        dt = object\n    out = np.empty_like(n, dtype=dt)\n    un = un[un > 1]\n    out[n < 2] = 1\n    out[n < 0] = 0\n    for lane in range(0, k):\n        ul = un[un % k == lane] if k > 1 else un\n        if ul.size:\n            val = _range_prod(1, int(ul[0]), k=k)\n            out[n == ul[0]] = val\n            for i in range(len(ul) - 1):\n                prev = ul[i]\n                current = ul[i + 1]\n                val *= _range_prod(int(prev + 1), int(current), k=k)\n                out[n == current] = val\n    if np.isnan(n).any():\n        out = out.astype(np.float64)\n        out[np.isnan(n)] = np.nan\n    return out",
    "docstring": "Exact computation of factorial for an array. The factorials are computed in incremental fashion, by taking the sorted unique values of n and multiplying the intervening numbers between the different unique values. In other words, the factorial for the largest input is only computed once, with each other result computed in the process. k > 1 corresponds to the multifactorial.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:_factorialx_array_exact arg:n arg:k arguments arg arg Assign Call Assign Call If Call Call Assign If Compare Call If Compare Assign If Compare Assign Assign Call Assign Assign Call Assign Compare Assign Compare Assign Compare For Call Assign Compare Compare If Assign Call Call Assign Compare For Call Call Assign Assign Call Call Call Assign Compare If Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "assemble_as_sql",
    "source_code": "def assemble_as_sql(self, fields, value_rows):\n    if not value_rows:\n        return ([], [])\n    get_placeholders = [getattr(field, 'get_placeholder', None) for field in fields]\n    rows_of_fields_as_sql = ((self.field_as_sql(field, get_placeholder, value) for field, get_placeholder, value in zip(fields, get_placeholders, row)) for row in value_rows)\n    sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)\n    placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)\n    param_rows = [[p for ps in row for p in ps] for row in param_rows]\n    return (placeholder_rows, param_rows)",
    "docstring": "Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\compiler.py",
    "ast_data": "FunctionDef name:assemble_as_sql arg:self arg:fields arg:value_rows arguments arg arg arg If Return return:no Assign Call Assign Call Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_ensure_ndmin_ndarray_check_param",
    "source_code": "def _ensure_ndmin_ndarray_check_param(ndmin):\n    if ndmin not in [0, 1, 2]:\n        raise ValueError(f'Illegal value of ndmin keyword: {ndmin}')",
    "docstring": "Just checks if the param ndmin is supported on _ensure_ndmin_ndarray. It is intended to be used as verification before running anything expensive. e.g. loadtxt, genfromtxt",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_npyio_impl.py",
    "ast_data": "FunctionDef name:_ensure_ndmin_ndarray_check_param arg:ndmin arguments arg If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "name_based_save",
    "source_code": "@tf_export('experimental.dtensor.name_based_save', v1=[])\ndef name_based_save(mesh: layout_lib.Mesh, checkpoint_prefix: Union[str, tensor_lib.Tensor], name_tensor_dict: Dict[str, Union[tensor_lib.Tensor, tf_variables.Variable]]):\n    if not context.executing_eagerly():\n        raise ValueError('name based save must run eagerly.')\n    ordered_name_tensor_dict = name_tensor_dict\n    if not isinstance(name_tensor_dict, collections.OrderedDict):\n        ordered_name_tensor_dict = collections.OrderedDict(name_tensor_dict)\n    checkpoint_prefix = api.pack([checkpoint_prefix] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=0))\n    tensor_names = api.pack([list(ordered_name_tensor_dict.keys())] * mesh.num_local_devices(), layout_lib.Layout.replicated(mesh.host_mesh(), rank=1))\n    sharded_save(mesh, file_prefix=checkpoint_prefix, tensor_names=tensor_names, shape_and_slices=[''] * len(ordered_name_tensor_dict), tensors=list(ordered_name_tensor_dict.values()))",
    "docstring": "Saves name based Tensor into a Checkpoint. The function prepares the input dictionary to the format of a , so that it can take advantage of DTensor SPMD based distributed save. Same as restore, the function only supports saving on the single mesh. Args: mesh: The single mesh that all Tensors would be restored to. checkpoint_prefix : The prefix of checkpoint to be restored. name_tensor_dict: A ordered dictionary of tensor_names to a DTensor. The DTensor shape/dtype must match the tensors being saved/restored for now.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\save_restore.py",
    "ast_data": "FunctionDef name:name_based_save arg:mesh arg:checkpoint_prefix arg:name_tensor_dict arguments arg arg arg If Call Raise Call Assign If Call Assign Call Assign Call Call Call Call Assign Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_init_func",
    "source_code": "def _make_init_func(self, init_func):\n    self._init_func = structured_function.StructuredFunctionWrapper(init_func, self._transformation_name(), input_structure=tensor_spec.TensorSpec([], dtypes.int64))",
    "docstring": "Make wrapping defun for init_func.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\grouping.py",
    "ast_data": "FunctionDef name:_make_init_func arg:self arg:init_func arguments arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "validate_callbacks",
    "source_code": "def validate_callbacks(input_callbacks, optimizer):\n    if input_callbacks:\n        for callback in input_callbacks:\n            if isinstance(callback, (callbacks.LearningRateScheduler, callbacks.ReduceLROnPlateau)):\n                if not isinstance(optimizer, optimizer_v2.OptimizerV2):\n                    raise ValueError('You must specify a Keras Optimizer V2 when using %s callback with DistributionStrategy.' % callback)\n            if isinstance(callback, callbacks.TensorBoard):\n                if getattr(callback, 'write_grads', False):\n                    logging.warning(UserWarning('`write_grads` in the TensorBoard callback is not supported when using DistributionStrategy. Setting `write_grads` to `False`.'))\n                    callback.write_grads = False",
    "docstring": "Validate whether given callbacks are supported by DistributionStrategy. Args: input_callbacks: List of callbacks passed by the user to fit. optimizer: Optimizer instance used to train the model. Raises: ValueError: If or is one of the callbacks passed. ValueError: If is one of the parameters passed as part of the TensorBoard callback.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:validate_callbacks arg:input_callbacks arg:optimizer arguments arg arg If For If Call If Call Raise Call If Call If Call Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "profiler",
    "source_code": "@property\ndef profiler(self):\n    if not self._enabled:\n        return None\n    if not self._profiler:\n        self._profiler = model_analyzer.Profiler(ops.get_default_graph())\n    return self._profiler",
    "docstring": "Returns the current profiler object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\profile_context.py",
    "ast_data": "FunctionDef name:profiler arg:self arguments arg If Return return:no If Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "AutoFieldMeta",
    "source_code": "class AutoFieldMeta(type):\n\n    @property\n    def _subclasses(self):\n        return (BigAutoField, SmallAutoField)\n\n    def __instancecheck__(self, instance):\n        return isinstance(instance, self._subclasses) or super().__instancecheck__(instance)\n\n    def __subclasscheck__(self, subclass):\n        return issubclass(subclass, self._subclasses) or super().__subclasscheck__(subclass)",
    "docstring": "Metaclass to maintain backward inheritance compatibility for AutoField. It is intended that AutoFieldMixin become public API when it is possible to create a non-integer automatically-generated field using column defaults stored in the database. In many areas Django also relies on using isinstance() to check for an automatically-generated field as a subclass of AutoField. A new flag needs to be implemented on Field to be used instead. When these issues have been addressed, this metaclass could be used to deprecate inheritance from AutoField and use of isinstance() with AutoField for detecting automatically-generated fields.",
    "type": "class",
    "file_path": "django\\django\\db\\models\\fields\\__init__.py",
    "ast_data": "ClassDef name:AutoFieldMeta FunctionDef name:_subclasses arg:self arguments arg Return return:yes FunctionDef name:__instancecheck__ arg:self arg:instance arguments arg arg Return return:yes BoolOp Call Call Call FunctionDef name:__subclasscheck__ arg:self arg:subclass arguments arg arg Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "sphinx",
    "name": "split",
    "source_code": "def split(self, input: str) -> list[str]:\n    return self._word_re.findall(input)",
    "docstring": "This method splits a sentence into words. Default splitter splits input at white spaces, which should be enough for most languages except CJK languages.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\search\\__init__.py",
    "ast_data": "FunctionDef name:split arg:self arg:input arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "should_record_summaries",
    "source_code": "@tf_export('summary.should_record_summaries', v1=[])\ndef should_record_summaries():\n    return _should_record_summaries_internal(default_state=True)",
    "docstring": "Returns boolean Tensor which is True if summaries will be recorded. If no default summary writer is currently registered, this always returns False. Otherwise, this reflects the recording condition has been set via (except that it may return False for some replicas when using ). If no recording condition is active, it defaults to True.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:should_record_summaries arguments Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "_parse_tables",
    "source_code": "def _parse_tables(self, document, match, attrs):\n    raise AbstractMethodError(self)",
    "docstring": "Return all tables from the parsed DOM. Parameters ---------- document : the DOM from which to parse the table element. match : str or regular expression The text to search for in the DOM tree. attrs : dict A dictionary of table attributes that can be used to disambiguate multiple tables on a page. Raises ------ ValueError : does not match any text in the document. Returns ------- list of node-like HTML elements to be parsed into raw data.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\html.py",
    "ast_data": "FunctionDef name:_parse_tables arg:self arg:document arg:match arg:attrs arguments arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "prepare_global_plan",
    "source_code": "def prepare_global_plan(self, global_plan: list[LoadPlan]) -> list[LoadPlan]:\n    return global_plan",
    "docstring": "Implementation of the StorageReader method",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\format_utils.py",
    "ast_data": "FunctionDef name:prepare_global_plan arg:self arg:global_plan arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_prune_invalid_weights",
    "source_code": "def _prune_invalid_weights(sparse_ids, sparse_weights):\n    if sparse_weights is not None:\n        is_weights_valid = math_ops.greater(sparse_weights.values, 0)\n        sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)\n        sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)\n    return (sparse_ids, sparse_weights)",
    "docstring": "Prune invalid weights (< 0) from the input ids and weights.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:_prune_invalid_weights arg:sparse_ids arg:sparse_weights arguments arg arg If Compare Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_enabled",
    "source_code": "def is_enabled(self):\n    if self.is_flag_on(FLAG_NAME_ENABLE):\n        logging.debug('Tensor Tracer is enabled with flags %s.', self._env.get(FLAGS_ENV_VAR))\n        return True\n    else:\n        return False",
    "docstring": "Returns True if TensorTracer is enabled.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:is_enabled arg:self arguments arg If Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_active",
    "source_code": "@classmethod\ndef set_active(cls, manager):\n    cls.figs[manager.num] = manager\n    cls.figs.move_to_end(manager.num)",
    "docstring": "Make *manager* the active manager.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_pylab_helpers.py",
    "ast_data": "FunctionDef name:set_active arg:cls arg:manager arguments arg arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "logits_to_probs",
    "source_code": "def logits_to_probs(logits, is_binary=False):\n    if is_binary:\n        return torch.sigmoid(logits)\n    return F.softmax(logits, dim=-1)",
    "docstring": "Converts a tensor of logits into probabilities. Note that for the binary case, each value denotes log odds, whereas for the multi-dimensional case, the values along the last dimension denote the log probabilities (possibly unnormalized) of the events.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributions\\utils.py",
    "ast_data": "FunctionDef name:logits_to_probs arg:logits arg:is_binary arguments arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "preferred_device_type",
    "source_code": "@tf_export('experimental.dtensor.preferred_device_type', v1=[])\ndef preferred_device_type() -> str:\n    if is_tpu_present():\n        return 'TPU'\n    elif is_gpu_present():\n        return 'GPU'\n    return 'CPU'",
    "docstring": "Returns the preferred device type for the accelerators. The returned device type is determined by checking the first present device type from all supported device types in the order of 'TPU', 'GPU', 'CPU'.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py",
    "ast_data": "FunctionDef name:preferred_device_type arguments If Call Return return:yes If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "format_data",
    "source_code": "def format_data(self, value):\n    return self.__call__(value)",
    "docstring": "Return the full string representation of the value with the position unspecified.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:format_data arg:self arg:value arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "increase_by",
    "source_code": "def increase_by(self, value):\n    pywrap_tfe.TFE_MonitoringCounterCellIncrementBy(self._cell, value)",
    "docstring": "Atomically increments the value. Args: value: non-negative value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:increase_by arg:self arg:value arguments arg arg Call"
  },
  {
    "library": "seaborn",
    "name": "figure",
    "source_code": "@property\ndef figure(self):\n    return self._figure",
    "docstring": "Access the :class: object underlying the grid.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:figure arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_create_block_3_diagonal_matrix",
    "source_code": "def _create_block_3_diagonal_matrix(A, B, d):\n    ind = np.arange(3)\n    ind_blocks = np.arange(len(A))\n    A_i = np.empty_like(A, dtype=int)\n    A_i[:] = ind[:, None]\n    A_i += 3 * (1 + ind_blocks[:, None, None])\n    A_j = np.empty_like(A, dtype=int)\n    A_j[:] = ind\n    A_j += 3 * ind_blocks[:, None, None]\n    B_i = np.empty_like(B, dtype=int)\n    B_i[:] = ind[:, None]\n    B_i += 3 * ind_blocks[:, None, None]\n    B_j = np.empty_like(B, dtype=int)\n    B_j[:] = ind\n    B_j += 3 * (1 + ind_blocks[:, None, None])\n    diag_i = diag_j = np.arange(3 * len(d))\n    i = np.hstack((A_i.ravel(), B_i.ravel(), diag_i))\n    j = np.hstack((A_j.ravel(), B_j.ravel(), diag_j))\n    values = np.hstack((A.ravel(), B.ravel(), np.repeat(d, 3)))\n    u = 5\n    l = 5\n    result = np.zeros((u + l + 1, 3 * len(d)))\n    result[u + i - j, j] = values\n    return result",
    "docstring": "Create a 3-diagonal block matrix as banded. The matrix has the following structure: DB... ADB.. .ADB. ..ADB ...AD The blocks A, B and D are 3-by-3 matrices. The D matrices has the form d * I. Parameters ---------- A : ndarray, shape (n, 3, 3) Stack of A blocks. B : ndarray, shape (n, 3, 3) Stack of B blocks. d : ndarray, shape (n + 1,) Values for diagonal blocks. Returns ------- ndarray, shape (11, 3 * (n + 1)) Matrix in the banded form as used by .",
    "type": "function",
    "file_path": "scipy\\scipy\\spatial\\transform\\_rotation_spline.py",
    "ast_data": "FunctionDef name:_create_block_3_diagonal_matrix arg:A arg:B arg:d arguments arg arg arg Assign Call Assign Call Call Assign Call Assign Assign Call Assign Assign Call Assign Assign Call Assign Assign Call Call Assign Call Call Call Assign Call Call Call Assign Call Call Call Call Assign Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "allocator",
    "source_code": "@property\ndef allocator(self) -> str:\n    return self._allocator",
    "docstring": "Name of the allocator used to create this tensor (string).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:allocator arg:self arguments arg Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "_check_empty",
    "source_code": "def _check_empty(data: utils.Buffer) -> None:\n    if data:\n        raise ValueError('Corrupt data: unparsed data')",
    "docstring": "All data should have been parsed.",
    "type": "function",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:_check_empty arg:data arguments arg If Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "to_polygons",
    "source_code": "def to_polygons(self, transform=None, width=0, height=0, closed_only=True):\n    if len(self.vertices) == 0:\n        return []\n    if transform is not None:\n        transform = transform.frozen()\n    if self.codes is None and (width == 0 or height == 0):\n        vertices = self.vertices\n        if closed_only:\n            if len(vertices) < 3:\n                return []\n            elif np.any(vertices[0] != vertices[-1]):\n                vertices = [*vertices, vertices[0]]\n        if transform is None:\n            return [vertices]\n        else:\n            return [transform.transform(vertices)]\n    return _path.convert_path_to_polygons(self, transform, width, height, closed_only)",
    "docstring": "Convert this path to a list of polygons or polylines. Each polygon/polyline is an (N, 2) array of vertices. In other words, each polygon has no instructions or curves. This is useful for displaying in backends that do not support compound paths or Bézier curves. If *width* and *height* are both non-zero then the lines will be simplified so that vertices outside of (0, 0), (width, height) will be clipped. The resulting polygons will be simplified if the :attr: attribute of the path is . If *closed_only* is (default), only closed polygons, with the last point being the same as the first point, will be returned. Any unclosed polylines in the path will be explicitly closed. If *closed_only* is , any unclosed polygons in the path will be returned as unclosed polygons, and the closed polygons will be returned explicitly closed by setting the last point to the same as the first point.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:to_polygons arg:self arg:transform arg:width arg:height arg:closed_only arguments arg arg arg arg arg If Compare Call Return return:no If Compare Assign Call If BoolOp Compare BoolOp Compare Compare Assign If If Compare Call Return return:no If Call Compare Assign If Compare Return return:yes Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "parameters",
    "source_code": "@property\ndef parameters(self) -> Mapping[str, Any]:\n    return super().parameters",
    "docstring": "Returns an ordered mapping of parameter name to specification.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:parameters arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "build",
    "source_code": "def build(self, y_pred, y_true):\n    super(MetricsContainer, self).build(y_pred)\n    self._metrics = self._maybe_broadcast_to_outputs(y_pred, self._metrics)\n    self._metrics = self._conform_to_outputs(y_pred, self._metrics)\n    self._weighted_metrics = self._maybe_broadcast_to_outputs(y_pred, self._weighted_metrics)\n    self._weighted_metrics = self._conform_to_outputs(y_pred, self._weighted_metrics)\n    y_pred = nest.list_to_tuple(y_pred)\n    y_true = nest.list_to_tuple(y_true)\n    self._metrics = nest.list_to_tuple(self._metrics)\n    self._weighted_metrics = nest.list_to_tuple(self._weighted_metrics)\n    self._metrics = nest.map_structure_up_to(y_pred, self._get_metric_objects, self._metrics, y_true, y_pred)\n    self._weighted_metrics = nest.map_structure_up_to(y_pred, self._get_metric_objects, self._weighted_metrics, y_true, y_pred)\n    self._metrics = nest.flatten_up_to(y_pred, self._metrics, check_types=False)\n    self._weighted_metrics = nest.flatten_up_to(y_pred, self._weighted_metrics, check_types=False)\n    if not self._from_serialized:\n        self._set_metric_names()\n    self._create_ordered_metrics()\n    self._built = True",
    "docstring": "One-time setup of metric objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py",
    "ast_data": "FunctionDef name:build arg:self arg:y_pred arg:y_true arguments arg arg arg Call Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call If Call Call Assign"
  },
  {
    "library": "sphinx",
    "name": "fetch_rvalue",
    "source_code": "def fetch_rvalue(self) -> list[Token]:\n    tokens = []\n    while (current := self.fetch_token()):\n        tokens.append(current)\n        if current == [OP, '(']:\n            tokens += self.fetch_until([OP, ')'])\n        elif current == [OP, '{']:\n            tokens += self.fetch_until([OP, '}'])\n        elif current == [OP, '[']:\n            tokens += self.fetch_until([OP, ']'])\n        elif current == INDENT:\n            tokens += self.fetch_until(DEDENT)\n        elif current == [OP, ';']:\n            break\n        elif current and current.kind not in {OP, NAME, NUMBER, STRING}:\n            break\n    return tokens",
    "docstring": "Fetch right-hand value of assignment.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\pycode\\parser.py",
    "ast_data": "FunctionDef name:fetch_rvalue arg:self arguments arg Assign While Call Call If Compare Call If Compare Call If Compare Call If Compare Call If Compare If BoolOp Compare Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_apply",
    "source_code": "def _apply(self, func: Callable[..., Any], name: str, numeric_only: bool=False, numba_args: tuple[Any, ...]=(), **kwargs):\n    window_indexer = self._get_window_indexer()\n    min_periods = self.min_periods if self.min_periods is not None else window_indexer.window_size\n\n    def homogeneous_func(values: np.ndarray):\n        if values.size == 0:\n            return values.copy()\n\n        def calc(x):\n            start, end = window_indexer.get_window_bounds(num_values=len(x), min_periods=min_periods, center=self.center, closed=self.closed, step=self.step)\n            self._check_window_bounds(start, end, len(x))\n            return func(x, start, end, min_periods, *numba_args)\n        with np.errstate(all='ignore'):\n            result = calc(values)\n        return result\n    if self.method == 'single':\n        return self._apply_columnwise(homogeneous_func, name, numeric_only)\n    else:\n        return self._apply_tablewise(homogeneous_func, name, numeric_only)",
    "docstring": "Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. Parameters ---------- func : callable function to apply name : str, numba_args : tuple args to be passed when func is a numba func **kwargs additional arguments for rolling function and window function Returns ------- y : type of input",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:_apply arg:self arg:func arg:name arg:numeric_only arg:numba_args arguments arg arg arg arg arg arg Assign Call Assign Compare FunctionDef name:homogeneous_func arg:values arguments arg If Compare Return return:yes Call FunctionDef name:calc arg:x arguments arg Assign Call Call Call Call Return return:yes Call With Call Assign Call Return return:yes If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "may_depend_on",
    "source_code": "def may_depend_on(a: Node, b: Node, search_depth: int=6):\n    if a == b:\n        return True\n    if len(a.all_input_nodes) == 0:\n        return False\n    if search_depth == 0:\n        return True\n    for inp in a.all_input_nodes:\n        if may_depend_on(inp, b, search_depth - 1):\n            return True\n    return False",
    "docstring": "Determine if one node depends on another in a torch.fx.Graph. Arguments: a: The node that may have a dependency on b. b: The node that a may have a dependency on. search_depth: In the case of an indirect dependency, this function searches upto this many nodes away in search of a data dependency. If none is found, the function makes the conservative assumption that there is a dependency. Returns: True if a may depend on b, False if it definitely does not.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\merge_matmul.py",
    "ast_data": "FunctionDef name:may_depend_on arg:a arg:b arg:search_depth arguments arg arg arg If Compare Return return:yes If Compare Call Return return:yes If Compare Return return:yes For If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "load",
    "source_code": "def load(self) -> RepresentativeDatasetMapping:\n    raise NotImplementedError('Method \"load\" is not implemented.')",
    "docstring": "Loads the representative datasets. Returns: representative dataset mapping: A loaded signature def key -> representative mapping.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py",
    "ast_data": "FunctionDef name:load arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "tensor_shape_proto",
    "source_code": "def tensor_shape_proto(outputsize):\n    return TensorShapeProto(dim=[TensorShapeProto.Dim(size=d) for d in outputsize])",
    "docstring": "Create an object matching a tensor_shape field. Follows .",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\_proto_graph.py",
    "ast_data": "FunctionDef name:tensor_shape_proto arg:outputsize arguments arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_datetime_to_pdf",
    "source_code": "def _datetime_to_pdf(d):\n    r = d.strftime('D:%Y%m%d%H%M%S')\n    z = d.utcoffset()\n    if z is not None:\n        z = z.seconds\n    elif time.daylight:\n        z = time.altzone\n    else:\n        z = time.timezone\n    if z == 0:\n        r += 'Z'\n    elif z < 0:\n        r += \"+%02d'%02d'\" % (-z // 3600, -z % 3600)\n    else:\n        r += \"-%02d'%02d'\" % (z // 3600, z % 3600)\n    return r",
    "docstring": "Convert a datetime to a PDF string representing it. Used for PDF and PGF.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py",
    "ast_data": "FunctionDef name:_datetime_to_pdf arg:d arguments arg Assign Call Assign Call If Compare Assign If Assign Assign If Compare If Compare Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_apply_unary_to_chunks",
    "source_code": "def _apply_unary_to_chunks(f, chunks_by_dev):\n    output = []\n    for x in chunks_by_dev:\n        with ops.colocate_with(x[0]):\n            output.append([f(t) for t in x])\n    return output",
    "docstring": "Apply a unary op to each tensor in chunks_by_dev, on same device. Args: f: a unary function over . chunks_by_dev: list of lists of . Returns: new list of lists of with the same structure as chunks_by_dev containing the derived tensors.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:_apply_unary_to_chunks arg:f arg:chunks_by_dev arguments arg arg Assign For With Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    self.components_ = None\n    self.n_samples_seen_ = 0\n    self.mean_ = 0.0\n    self.var_ = 0.0\n    self.singular_values_ = None\n    self.explained_variance_ = None\n    self.explained_variance_ratio_ = None\n    self.noise_variance_ = None\n    X = validate_data(self, X, accept_sparse=['csr', 'csc', 'lil'], copy=self.copy, dtype=[np.float64, np.float32], force_writeable=True)\n    n_samples, n_features = X.shape\n    if self.batch_size is None:\n        self.batch_size_ = 5 * n_features\n    else:\n        self.batch_size_ = self.batch_size\n    for batch in gen_batches(n_samples, self.batch_size_, min_batch_size=self.n_components or 0):\n        X_batch = X[batch]\n        if sparse.issparse(X_batch):\n            X_batch = X_batch.toarray()\n        self.partial_fit(X_batch, check_input=False)\n    return self",
    "docstring": "Fit the model with X, using minibatches of size batch_size. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_incremental_pca.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Call Assign If Compare Assign Assign For Call BoolOp Assign If Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "__call__",
    "source_code": "def __call__(self, x, nu=0, extrapolate=None):\n    if extrapolate is None:\n        extrapolate = self.extrapolate\n    x = np.asarray(x)\n    x_shape, x_ndim = (x.shape, x.ndim)\n    x = np.ascontiguousarray(x.ravel(), dtype=np.float64)\n    if extrapolate == 'periodic':\n        n = self.t.size - self.k - 1\n        x = self.t[self.k] + (x - self.t[self.k]) % (self.t[n] - self.t[self.k])\n        extrapolate = False\n    self._ensure_c_contiguous()\n    is_complex = self.c.dtype.kind == 'c'\n    if is_complex:\n        cc = self.c.view(float)\n        if self.c.ndim == 1:\n            cc = cc.reshape(self.c.shape[0], 2)\n    else:\n        cc = self.c\n    cc = cc.reshape(cc.shape[0], -1)\n    out = _dierckx.evaluate_spline(self.t, cc, self.k, x, nu, extrapolate)\n    if is_complex:\n        out = out.view(complex)\n    out = out.reshape(x_shape + self.c.shape[1:])\n    if self.axis != 0:\n        l = list(range(out.ndim))\n        l = l[x_ndim:x_ndim + self.axis] + l[:x_ndim] + l[x_ndim + self.axis:]\n        out = out.transpose(l)\n    return out",
    "docstring": "Evaluate a spline function. Parameters ---------- x : array_like points to evaluate the spline at. nu : int, optional derivative to evaluate (default is 0). extrapolate : bool or 'periodic', optional whether to extrapolate based on the first and last intervals or return nans. If 'periodic', periodic extrapolation is used. Default is . Returns ------- y : array_like Shape is determined by replacing the interpolation axis in the coefficient array with the shape of .",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:nu arg:extrapolate arguments arg arg arg arg If Compare Assign Assign Call Assign Assign Call Call If Compare Assign Assign Assign Call Assign Compare If Assign Call If Compare Assign Call Assign Assign Call Assign Call If Assign Call Assign Call If Compare Assign Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "attach_out_of_memory_observer",
    "source_code": "def attach_out_of_memory_observer(observer: Callable[[int, int, int, int], None]) -> None:\n    torch._C._mtia_attachOutOfMemoryObserver(observer)",
    "docstring": "Attach an out-of-memory observer to MTIA memory allocator",
    "type": "function",
    "file_path": "pytorch\\torch\\mtia\\__init__.py",
    "ast_data": "FunctionDef name:attach_out_of_memory_observer arg:observer arguments arg Call"
  },
  {
    "library": "matplotlib",
    "name": "thumbnail",
    "source_code": "def thumbnail(infile, thumbfile, scale=0.1, interpolation='bilinear', preview=False):\n    im = imread(infile)\n    rows, cols, depth = im.shape\n    dpi = 100\n    height = rows / dpi * scale\n    width = cols / dpi * scale\n    if preview:\n        import matplotlib.pyplot as plt\n        fig = plt.figure(figsize=(width, height), dpi=dpi)\n    else:\n        from matplotlib.figure import Figure\n        fig = Figure(figsize=(width, height), dpi=dpi)\n        FigureCanvasBase(fig)\n    ax = fig.add_axes([0, 0, 1, 1], aspect='auto', frameon=False, xticks=[], yticks=[])\n    ax.imshow(im, aspect='auto', resample=True, interpolation=interpolation)\n    fig.savefig(thumbfile, dpi=dpi)\n    return fig",
    "docstring": "Make a thumbnail of image in *infile* with output filename *thumbfile*. See :doc:. Parameters ---------- infile : str or file-like The image file. Matplotlib relies on Pillow_ for image reading, and thus supports a wide range of file formats, including PNG, JPG, TIFF and others. .. _Pillow: thumbfile : str or file-like The thumbnail filename. scale : float, default: 0.1 The scale factor for the thumbnail. interpolation : str, default: 'bilinear' The interpolation scheme used in the resampling. See the *interpolation* parameter of for possible values. preview : bool, default: False If True, the default backend (presumably a user interface backend) will be used which will cause a figure to be raised if is called. If it is False, the figure is created using and the drawing backend is selected as would normally do. Returns ------- The figure instance containing the thumbnail.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:thumbnail arg:infile arg:thumbfile arg:scale arg:interpolation arg:preview arguments arg arg arg arg arg Assign Call Assign Assign Assign Assign If Assign Call Assign Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_annotate_modules_for_dynamo",
    "source_code": "def _annotate_modules_for_dynamo(module: nn.Module, ignored_modules: set[nn.Module], use_orig_params: bool) -> None:\n    for submodule in module.modules():\n        if submodule not in ignored_modules:\n            \"[note: Dynamo treats FSDP wrapped modules as UnspecializedNNModule]\\n\\n            Dynamo doesn't get to see this instance (FullyShardedDataParallel) during tracing, since\\n            it skips tracing all the torch.distributed.fsdp code.\\n                - Why? Running the FSDP code eagerly avoids lots of issues trying to trace complex hooks, and also\\n                gets us graph-breaks on FSDP module boundaries which we want anyway for comm ops.\\n                - However, we _also_ want dynamo to treat the wrapped module inside FSDP 'unspecially' (*),\\n                and we need a way to indicate to dynamo which modules are wrapped by FSDP.\\n\\n            (*) UnspecializedNNModules in dynamo are traced-through without any assumptions, and with thorough\\n            guards.  NNModules otherwise are 'specialized', meaning there is less overhead due to assuming\\n            their code is well-behaved.\\n\\n            One particular issue with specialized NNModules for FSDP is that the\\n            views created for orig_params are captured into the compiled graph on the first iteration, and while\\n            they are always going to point to the correct flatparameter and give correct results, their order\\n            of creation influences the order of backward execution, preventing overlap of comm and computation\\n            during backward.  We need to _use_ the new parameter views created on each forward iteration, in\\n            order for backward to interleave hooks with compute per layer.  UnspecializedNNModule lets us achieve\\n            this by capturing the module code more 'functionally' and passing parameters in as inputs each time.\\n            \"\n            submodule._is_fsdp_managed_module = True\n            submodule._fsdp_use_orig_params = use_orig_params",
    "docstring": "Annotates the submodules in `` setting passed to the FSDP constructor.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_dynamo_utils.py",
    "ast_data": "FunctionDef name:_annotate_modules_for_dynamo arg:module arg:ignored_modules arg:use_orig_params arguments arg arg arg For Call If Compare Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "pop_label",
    "source_code": "def pop_label(self, index=-1):\n    self.labelCValues.pop(index)\n    t = self.labelTexts.pop(index)\n    t.remove()",
    "docstring": "Defaults to removing last label, but any index can be supplied",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:pop_label arg:self arg:index arguments arg arg Call Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_iter_collection_uses_per_path",
    "source_code": "def _iter_collection_uses_per_path(self, paths, all_transforms, offsets, facecolors, edgecolors):\n    Npaths = len(paths)\n    if Npaths == 0 or len(facecolors) == len(edgecolors) == 0:\n        return 0\n    Npath_ids = max(Npaths, len(all_transforms))\n    N = max(Npath_ids, len(offsets))\n    return (N + Npath_ids - 1) // Npath_ids",
    "docstring": "Compute how many times each raw path object returned by would be used when calling . This is intended for the backend to decide on the tradeoff between using the paths in-line and storing them once and reusing. Rounds up in case the number of uses is not the same for every path.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_iter_collection_uses_per_path arg:self arg:paths arg:all_transforms arg:offsets arg:facecolors arg:edgecolors arguments arg arg arg arg arg arg Assign Call If BoolOp Compare Compare Call Call Return return:yes Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_ticklabels",
    "source_code": "def set_ticklabels(self, ticklabels, *, minor=False, **kwargs):\n    self.long_axis.set_ticklabels(ticklabels, minor=minor, **kwargs)",
    "docstring": "[*Discouraged*] Set tick labels. .. admonition:: Discouraged The use of this method is discouraged, because of the dependency on tick positions. In most cases, you'll want to use `.Colorbar.set_ticks~.ticker.FixedLocator.Text.Colorbar.set_ticks.Text` properties for the labels.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:set_ticklabels arg:self arg:ticklabels arguments arg arg arg arg Call"
  },
  {
    "library": "scikit-learn",
    "name": "partial_fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef partial_fit(self, X, y, classes=None, sample_weight=None):\n    if not hasattr(self, 'classes_'):\n        self._more_validate_params(for_partial_fit=True)\n        if self.class_weight == 'balanced':\n            raise ValueError(\"class_weight '{0}' is not supported for partial_fit. In order to use 'balanced' weights, use compute_class_weight('{0}', classes=classes, y=y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.\".format(self.class_weight))\n    return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss, learning_rate=self.learning_rate, max_iter=1, classes=classes, sample_weight=sample_weight, coef_init=None, intercept_init=None)",
    "docstring": "Perform one epoch of stochastic gradient descent on given samples. Internally, this method uses `np.unique(y_all)classes`. sample_weight : array-like, shape (n_samples,), default=None Weights applied to individual samples. If not provided, uniform weights are assumed. Returns ------- self : object Returns an instance of self.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_stochastic_gradient.py",
    "ast_data": "FunctionDef name:partial_fit arg:self arg:X arg:y arg:classes arg:sample_weight arguments arg arg arg arg arg If Call Call If Compare Raise Call Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "right_to_left_epipolar_distance",
    "source_code": "def right_to_left_epipolar_distance(pts1: Tensor, pts2: Tensor, Fm: Tensor) -> Tensor:\n    KORNIA_CHECK_IS_TENSOR(pts1)\n    KORNIA_CHECK_IS_TENSOR(pts2)\n    KORNIA_CHECK_IS_TENSOR(Fm)\n    if len(Fm.shape) < 3 or not Fm.shape[-2:] == (3, 3):\n        raise ValueError(f'Fm must be a (*, 3, 3) tensor. Got {Fm.shape}')\n    if pts2.shape[-1] == 2:\n        pts2 = convert_points_to_homogeneous(pts2)\n    line2_in_1: Tensor = pts2 @ Fm\n    return point_line_distance(pts1, line2_in_1)",
    "docstring": "Return one-sided epipolar distance for correspondences given the fundamental matrix. This method measures the distance from points in the left images to the epilines of the corresponding points in the right images as they reflect in the left images. Args: pts1: correspondences from the left images with shape :math:. If they are not homogeneous, converted automatically. pts2: correspondences from the right images with shape :math:. If they are not homogeneous, converted automatically. Fm: Fundamental matrices with shape :math:. Called Fm to avoid ambiguity with torch.nn.functional. Returns: the computed Symmetrical distance with shape :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\epipolar\\_metrics.py",
    "ast_data": "FunctionDef name:right_to_left_epipolar_distance arg:pts1 arg:pts2 arg:Fm arguments arg arg arg Call Call Call If BoolOp Compare Call Compare Raise Call If Compare Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "create",
    "source_code": "@classmethod\ndef create(cls, obj, body, evaldict, defaults=None, doc=None, module=None, addsource=True, **attrs):\n    if isinstance(obj, str):\n        name, rest = obj.strip().split('(', 1)\n        signature = rest[:-1]\n        func = None\n    else:\n        name = None\n        signature = None\n        func = obj\n    self = cls(func, name, signature, defaults, doc, module)\n    ibody = '\\n'.join(('    ' + line for line in body.splitlines()))\n    return self.make('def %(name)s(%(signature)s):\\n' + ibody, evaldict, addsource, **attrs)",
    "docstring": "Create a function from the strings name, signature, and body. evaldict is the evaluation dictionary. If addsource is true, an attribute __source__ is added to the result. The attributes attrs are added, if any.",
    "type": "method",
    "file_path": "scipy\\scipy\\_lib\\decorator.py",
    "ast_data": "FunctionDef name:create arg:cls arg:obj arg:body arg:evaldict arg:defaults arg:doc arg:module arg:addsource arguments arg arg arg arg arg arg arg arg arg If Call Assign Call Call Assign Assign Assign Assign Assign Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "sparse_",
    "source_code": "def sparse_(tensor, sparsity, std=0.01, generator: _Optional[torch.Generator]=None):\n    if tensor.ndimension() != 2:\n        raise ValueError('Only tensors with 2 dimensions are supported')\n    rows, cols = tensor.shape\n    num_zeros = int(math.ceil(sparsity * rows))\n    with torch.no_grad():\n        tensor.normal_(0, std, generator=generator)\n        for col_idx in range(cols):\n            row_indices = torch.randperm(rows)\n            zero_indices = row_indices[:num_zeros]\n            tensor[zero_indices, col_idx] = 0\n    return tensor",
    "docstring": "Fill the 2D input as a sparse matrix. The non-zero elements will be drawn from the normal distribution :math:, as described in - Martens, J. (2010). Args: tensor: an n-dimensional sparsity: The fraction of elements in each column to be set to zero std: the standard deviation of the normal distribution used to generate the non-zero values generator: the torch Generator to sample from (default: None) Examples: >>> w = torch.empty(3, 5) >>> nn.init.sparse_(w, sparsity=0.1)",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\init.py",
    "ast_data": "FunctionDef name:sparse_ arg:tensor arg:sparsity arg:std arg:generator arguments arg arg arg arg If Compare Call Raise Call Assign Assign Call Call With Call Call For Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_rot90_4D",
    "source_code": "def _rot90_4D(images, k, name_scope):\n\n    def _rot90():\n        return array_ops.transpose(array_ops.reverse_v2(images, [2]), [0, 2, 1, 3])\n\n    def _rot180():\n        return array_ops.reverse_v2(images, [1, 2])\n\n    def _rot270():\n        return array_ops.reverse_v2(array_ops.transpose(images, [0, 2, 1, 3]), [2])\n    cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180), (math_ops.equal(k, 3), _rot270)]\n    result = control_flow_case.case(cases, default=lambda: images, exclusive=True, name=name_scope)\n    shape = result.get_shape()\n    result.set_shape([shape[0], None, None, shape[3]])\n    return result",
    "docstring": "Rotate batch of images counter-clockwise by 90 degrees times. Args: images: 4-D Tensor of shape . k: A scalar integer. The number of times the images are rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 4-D of the same type and shape as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_rot90_4D arg:images arg:k arg:name_scope arguments arg arg arg FunctionDef name:_rot90 arguments Return return:yes Call Call FunctionDef name:_rot180 arguments Return return:yes Call FunctionDef name:_rot270 arguments Return return:yes Call Call Assign Call Call Call Assign Call arguments Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "select_data_adapter",
    "source_code": "def select_data_adapter(x, y):\n    adapter_cls = [cls for cls in ALL_ADAPTER_CLS if cls.can_handle(x, y)]\n    if not adapter_cls:\n        raise ValueError('Failed to find data adapter that can handle input: {}, {}'.format(_type_name(x), _type_name(y)))\n    elif len(adapter_cls) > 1:\n        raise RuntimeError('Data adapters should be mutually exclusive for handling inputs. Found multiple adapters {} to handle input: {}, {}'.format(adapter_cls, _type_name(x), _type_name(y)))\n    return adapter_cls[0]",
    "docstring": "Selects a data adapter than can handle a given x and y.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:select_data_adapter arg:x arg:y arguments arg arg Assign Call If Raise Call Call Call Call If Compare Call Raise Call Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "range_check",
    "source_code": "def range_check(i, n):\n    if i >= 0:\n        return T() if i < n else F()\n    else:\n        return T() if i >= n else F()",
    "docstring": "Checks if an index i is within range of a size n list Args: i: index n: list size Returns: Boolean",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:range_check arg:i arg:n arguments arg arg If Compare Return return:yes Compare Call Call Return return:yes Compare Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_process_tensorlike",
    "source_code": "def _process_tensorlike(inputs):\n\n    def _convert_numpy_and_scipy(x):\n        if isinstance(x, np.ndarray):\n            dtype = None\n            if issubclass(x.dtype.type, np.floating):\n                dtype = backend.floatx()\n            return tensor_conversion.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)\n        elif _is_scipy_sparse(x):\n            return _scipy_sparse_to_sparse_tensor(x)\n        return x\n    inputs = nest.map_structure(_convert_numpy_and_scipy, inputs)\n    return nest.list_to_tuple(inputs)",
    "docstring": "Process tensor-like inputs. This function: (1) Converts arrays to s. (2) Converts sparse matrices to s. (2) Converts s to s (for support). Args: inputs: Structure of s, arrays, or tensor-like. Returns: Structure of s or tensor-like.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py",
    "ast_data": "FunctionDef name:_process_tensorlike arg:inputs arguments arg FunctionDef name:_convert_numpy_and_scipy arg:x arguments arg If Call Assign If Call Assign Call Return return:yes Call If Call Return return:yes Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_constrained_layout",
    "source_code": "def get_constrained_layout(self):\n    return self._parent.get_constrained_layout()",
    "docstring": "Return whether constrained layout is being used. See :ref:.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\figure.py",
    "ast_data": "FunctionDef name:get_constrained_layout arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_superset_of",
    "source_code": "def is_superset_of(self, module_stack: _ModuleStackMeta) -> bool:\n    if self.is_empty_or_root():\n        return False\n    if module_stack.is_empty_or_root() is None:\n        return True\n    if len(self) <= len(module_stack):\n        return False\n    for i, parent_key in enumerate(module_stack):\n        if self[i] != parent_key:\n            return False\n    return True",
    "docstring": "Determines if self is a superset of the provided module stack. I.e., If self includes all elements from the provided module stack, plus additional elements on top. If self is empty or root, this method always return False. Example: Consider the following module stack: stack_1 = [GPT, block1, Attention_1, MLP] stack_2 = [GPT, block1] stack_1.is_superset_of(stack_2) == True stack_2.is_superset_of(stack_1) == False stack_3 = [GPT, block2, Attention_1] stack_1.is_superset_of(stack_3) == False stack_3.is_superset_of(stack_1) == False",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:is_superset_of arg:self arg:module_stack arguments arg arg If Call Return return:yes If Compare Call Return return:yes If Compare Call Call Return return:yes For Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_aten_op",
    "source_code": "def get_aten_op(fn: Callable, name: str):\n    module = fn.__module__\n    prefix = 'torch._refs'\n    assert module.startswith(prefix)\n    module = module[len(prefix):]\n    if module:\n        module = module[1:]\n        module = module.replace('.', '_')\n        module = module + '_'\n    return getattr(torch._ops.ops.aten, f'{module}{name}')",
    "docstring": "Given the __module__ of reference and its name, it returns (our best guess of) the ATen name of the associated operation Note: In ATen, the __name__ of a function within a module often starts by the module name. E.g. linalg_eigh, or special_zeta",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:get_aten_op arg:fn arg:name arguments arg arg Assign Assign Call Assign Call If Assign Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_calculate_range_ratio",
    "source_code": "def _calculate_range_ratio(self, info_dict: dict, info_str: str, module_fqn: str) -> torch.Tensor:\n    prefix_str = self.ACTIVATION_PREFIX if info_str == self.INPUT_STR else self.WEIGHT_PREFIX\n    per_channel_range = info_dict[prefix_str + self.PER_CHANNEL_MAX_KEY] - info_dict[prefix_str + self.PER_CHANNEL_MIN_KEY]\n    global_range = info_dict[prefix_str + self.GLOBAL_MAX_KEY] - info_dict[prefix_str + self.GLOBAL_MIN_KEY]\n    if global_range == 0:\n        range_zero_explanation = \"We recommend removing this channel as it doesn't provide any useful information.\"\n        raise ValueError(f'The range of the {info_str} data for module {module_fqn} is 0, which means you have a constant value channel. {range_zero_explanation}')\n    ratio = per_channel_range / global_range\n    return ratio",
    "docstring": "Takes in an info dict and calculates the s_c matrix. Args: info_dict (dict): A dictionary of either input or weight range info info_str (str): A str describing whether currently looking at weight or input info Either \"weight\" or \"input\" module_fqn (str): The fqn of the module we are looking at Returns a tensor of values, where each value is the s_c stat for a different channel",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:_calculate_range_ratio arg:self arg:info_dict arg:info_str arg:module_fqn arguments arg arg arg arg Assign Compare Assign Assign If Compare Assign Raise Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "any_sparse",
    "source_code": "def any_sparse(classes):\n    return any((c is sparse_tensor.SparseTensor for c in nest.flatten(classes)))",
    "docstring": "Checks for sparse tensor. Args: classes: a structure of objects that identify the dataset item classes Returns: if contains a sparse tensor type and otherwise.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\util\\sparse.py",
    "ast_data": "FunctionDef name:any_sparse arg:classes arguments arg Return return:yes Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "__init__",
    "source_code": "def __init__(self) -> None:\n    self._registry: dict[registration.OpName, list[registration.ONNXFunction]] = defaultdict(list)\n    self._opset_version = _constants.TORCHLIB_OPSET\n    warnings.warn(f'torch.onnx.dynamo_export only implements opset version {self._opset_version} for now. If you need to use a different opset version, please register them with register_custom_op.')\n    self._initiate_registry_from_torchlib()",
    "docstring": "Initializes the registry",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "transform_feature",
    "source_code": "def transform_feature(self, transformation_cache, state_manager):\n    input_tensor = transformation_cache.get(self.key, state_manager)\n    if self.normalizer_fn is not None:\n        input_tensor = self.normalizer_fn(input_tensor)\n    return input_tensor",
    "docstring": "See base class. In this case, we apply the to the input tensor. Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables. Returns: Normalized input tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py",
    "ast_data": "FunctionDef name:transform_feature arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_lower_getattr_tensor_metadta_op",
    "source_code": "def _lower_getattr_tensor_metadta_op(model: GraphModule):\n    for n in model.graph.nodes:\n        if is_getattr_tensor_metadata_node(n):\n            maybe_dq = n.args[0]\n            if maybe_dq.op != 'call_method' or maybe_dq.target != 'dequantize':\n                continue\n            args = list(n.args)\n            args[0] = n.args[0].args[0]\n            n.args = tuple(args)",
    "docstring": "Modified the graph of the model inplace, to skip extra dequantize op before the general tensor shape ops when possible",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_lower_to_native_backend.py",
    "ast_data": "FunctionDef name:_lower_getattr_tensor_metadta_op arg:model arguments arg For If Call Assign If BoolOp Compare Compare Assign Call Assign Assign Call"
  },
  {
    "library": "pytorch",
    "name": "prepare_global_plan",
    "source_code": "@abc.abstractmethod\ndef prepare_global_plan(self, plans: list[SavePlan]) -> list[SavePlan]:\n    pass",
    "docstring": "Perform centralized planning of storage. This method is only called on the coordinator instance. While this method can produce a completely different plan, the preferred way is to store storage specific data in SavePlan::storage_data. Args: plans: A list of `` after storage global planning",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:prepare_global_plan arg:self arg:plans arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_DefaultReplicaThreadMode",
    "source_code": "class _DefaultReplicaThreadMode(_ThreadMode):\n\n    def __init__(self):\n        _ThreadMode.__init__(self, _get_default_strategy(), None, _get_default_replica_context())",
    "docstring": "Type of default value returned by . Used when the thread-local stack is empty.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "ClassDef name:_DefaultReplicaThreadMode FunctionDef name:__init__ arg:self arguments arg Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "Glue",
    "source_code": "class Glue(Node):\n\n    def __init__(self, glue_type: _GlueSpec | T.Literal['fil', 'fill', 'filll', 'neg_fil', 'neg_fill', 'neg_filll', 'empty', 'ss']):\n        super().__init__()\n        if isinstance(glue_type, str):\n            glue_spec = _GlueSpec._named[glue_type]\n        elif isinstance(glue_type, _GlueSpec):\n            glue_spec = glue_type\n        else:\n            raise ValueError('glue_type must be a glue spec name or instance')\n        self.glue_spec = glue_spec\n\n    def shrink(self) -> None:\n        super().shrink()\n        if self.size < NUM_SIZE_LEVELS:\n            g = self.glue_spec\n            self.glue_spec = g._replace(width=g.width * SHRINK_FACTOR)",
    "docstring": "Most of the information in this object is stored in the underlying `` class, which is shared between multiple glue objects. (This is a memory optimization which probably doesn't matter anymore, but it's easier to stick to what TeX does.)",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:Glue FunctionDef name:__init__ arg:self arg:glue_type arguments arg arg Call Call If Call Assign If Call Assign Raise Call Assign FunctionDef name:shrink arg:self arguments arg Call Call If Compare Assign Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "_set_fillstyle",
    "source_code": "def _set_fillstyle(self, fillstyle):\n    fillstyle = mpl._val_or_rc(fillstyle, 'markers.fillstyle')\n    _api.check_in_list(self.fillstyles, fillstyle=fillstyle)\n    self._fillstyle = fillstyle",
    "docstring": "Set the fillstyle. Parameters ---------- fillstyle : {'full', 'left', 'right', 'bottom', 'top', 'none'} The part of the marker surface that is colored with markerfacecolor.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\markers.py",
    "ast_data": "FunctionDef name:_set_fillstyle arg:self arg:fillstyle arguments arg arg Assign Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "prepare_local_plan",
    "source_code": "@abc.abstractmethod\ndef prepare_local_plan(self, plan: LoadPlan) -> LoadPlan:\n    pass",
    "docstring": "Perform storage-specific local planning. While this method can produce a completely different plan, the recommended way is to store storage specific data in LoadPlan::storage_data. Args: plan (LoadPlan): The local plan from the `` after storage local planning",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py",
    "ast_data": "FunctionDef name:prepare_local_plan arg:self arg:plan arguments arg arg"
  },
  {
    "library": "cryptography",
    "name": "parameters",
    "source_code": "@abc.abstractmethod\ndef parameters(self) -> DHParameters:\n    pass",
    "docstring": "The DHParameters object associated with this public key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dh.py",
    "ast_data": "FunctionDef name:parameters arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_get_node_from_fqn",
    "source_code": "def _get_node_from_fqn(self, node_fqn: str) -> torch.fx.node.Node:\n    node_to_return = None\n    for node in self._model.graph.nodes:\n        if node.target == node_fqn:\n            node_to_return = node\n            break\n    if node_to_return is None:\n        raise ValueError('The node_fqn is was not found within the module.')\n    assert isinstance(node_to_return, torch.fx.node.Node)\n    return node_to_return",
    "docstring": "Takes in a node fqn and returns the node based on the fqn Args node_fqn (str): The fully qualified name of the node we want to find in model Returns the Node object of the given node_fqn otherwise returns None",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:_get_node_from_fqn arg:self arg:node_fqn arguments arg arg Assign For If Compare Assign If Compare Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "forward_event_shape",
    "source_code": "def forward_event_shape(self, input_shape):\n    return self._forward_event_shape(tensor_shape.TensorShape(input_shape))",
    "docstring": "Shape of a single sample from a single batch as a . Same meaning as . May be only partially defined. Args: input_shape: indicating event-portion shape passed into function. Returns: forward_event_shape_tensor: indicating event-portion shape after applying . Possibly unknown.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:forward_event_shape arg:self arg:input_shape arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "tree_flatten_with_path",
    "source_code": "def tree_flatten_with_path(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]]=None) -> tuple[list[tuple[KeyPath, Any]], TreeSpec]:\n    raise NotImplementedError('KeyPaths are not yet supported in cxx_pytree.')",
    "docstring": "Flattens a pytree like :func:, but also returns each leaf's key path. Args: tree: a pytree to flatten. If it contains a custom type, that type must be registered with an appropriate when registered with :func:. is_leaf: An extra leaf predicate function that will be called at each flattening step. The function should have a single argument with signature `TrueTreeSpec` representing the structure of the flattened tree.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py",
    "ast_data": "FunctionDef name:tree_flatten_with_path arg:tree arg:is_leaf arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "get_default",
    "source_code": "def get_default():\n    return _default_ctx_stack.peek()",
    "docstring": "Returns the default execution context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\experimental\\context_stack.py",
    "ast_data": "FunctionDef name:get_default arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "should_load",
    "source_code": "def should_load(self, proto):\n    if proto.identifier != self.identifier:\n        return False\n    if self.version < proto.version.min_consumer:\n        return False\n    if proto.version.producer < self._min_producer_version:\n        return False\n    for bad_version in proto.version.bad_consumers:\n        if self.version == bad_version:\n            return False\n    return True",
    "docstring": "Checks if this object should load the SavedUserObject .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\revived_types.py",
    "ast_data": "FunctionDef name:should_load arg:self arg:proto arguments arg arg If Compare Return return:yes If Compare Return return:yes If Compare Return return:yes For If Compare Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "diag",
    "source_code": "@property\ndef diag(self):\n    in_diag = self.rows == self.cols\n    diag = np.zeros(min(self.n, self.n), dtype=np.float64)\n    diag[self.rows[in_diag]] = self.vals[in_diag]\n    return diag",
    "docstring": "Return the (dense) vector of the diagonal elements.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:diag arg:self arguments arg Assign Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_validate_limit",
    "source_code": "@staticmethod\ndef _validate_limit(limit, limit_type, n_features, is_empty_feature, keep_empty_feature):\n    n_features_in = _num_samples(is_empty_feature)\n    if limit is not None and (not np.isscalar(limit)) and (_num_samples(limit) != n_features_in):\n        raise ValueError(f\"'{limit_type}_value' should be of shape ({n_features_in},) when an array-like is provided. Got {len(limit)}, instead.\")\n    limit_bound = np.inf if limit_type == 'max' else -np.inf\n    limit = limit_bound if limit is None else limit\n    if np.isscalar(limit):\n        limit = np.full(n_features, limit)\n    limit = check_array(limit, ensure_all_finite=False, copy=False, ensure_2d=False)\n    if not keep_empty_feature and len(limit) == len(is_empty_feature):\n        limit = limit[~is_empty_feature]\n    return limit",
    "docstring": "Validate the limits (min/max) of the feature values. Converts scalar min/max limits to vectors of shape . Parameters ---------- limit: scalar or array-like The user-specified limit (i.e, min_value or max_value). limit_type: {'max', 'min'} Type of limit to validate. n_features: int Number of features in the dataset. is_empty_feature: ndarray, shape (n_features, ) Mask array indicating empty feature imputer has seen during fit. keep_empty_feature: bool If False, remove empty-feature indices from the limit. Returns ------- limit: ndarray, shape(n_features,) Array of limits, one for each feature.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\impute\\_iterative.py",
    "ast_data": "FunctionDef name:_validate_limit arg:limit arg:limit_type arg:n_features arg:is_empty_feature arg:keep_empty_feature arguments arg arg arg arg arg Assign Call If BoolOp Compare Call Compare Call Raise Call Call Assign Compare Assign Compare If Call Assign Call Assign Call If BoolOp Compare Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_cdf",
    "source_code": "def _cdf(self, x, mean, cov, maxpts, abseps, releps, lower_limit, rng):\n    lower = np.full(mean.shape, -np.inf) if lower_limit is None else lower_limit\n    b, a = np.broadcast_arrays(x, lower)\n    b, a = (b - mean, a - mean)\n    i_swap = b < a\n    signs = (-1) ** i_swap.sum(axis=-1)\n    a, b = (a.copy(), b.copy())\n    a[i_swap], b[i_swap] = (b[i_swap], a[i_swap])\n    n = x.shape[-1]\n    limits = np.concatenate((a, b), axis=-1)\n\n    def func1d(limits):\n        res = _qauto(_qmvn, cov, limits[:n], limits[n:], rng, error=abseps, limit=maxpts, n_batches=10)\n        return np.squeeze(res[0])\n    out = np.apply_along_axis(func1d, -1, limits) * signs\n    return _squeeze_output(out)",
    "docstring": "Multivariate normal cumulative distribution function. Parameters ---------- x : ndarray Points at which to evaluate the cumulative distribution function. mean : ndarray Mean of the distribution cov : array_like Covariance matrix of the distribution maxpts : integer The maximum number of points to use for integration abseps : float Absolute error tolerance releps : float Relative error tolerance lower_limit : array_like, optional Lower limit of integration of the cumulative distribution function. Default is negative infinity. Must be broadcastable with . rng : Generator an instance of ``, which is used internally for QMC integration. Notes ----- As this function does no argument checking, it should not be called directly; use 'cdf' instead. .. versionadded:: 1.0.0",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_cdf arg:self arg:x arg:mean arg:cov arg:maxpts arg:abseps arg:releps arg:lower_limit arg:rng arguments arg arg arg arg arg arg arg arg arg Assign Compare Call Assign Call Assign Assign Compare Assign Call Assign Call Call Assign Assign Assign Call FunctionDef name:func1d arg:limits arguments arg Assign Call Return return:yes Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "deref",
    "source_code": "def deref(self):\n    return self._wrapped",
    "docstring": "Returns the referenced object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\object_identity.py",
    "ast_data": "FunctionDef name:deref arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_binned",
    "source_code": "def predict_binned(self, X, missing_values_bin_idx, n_threads):\n    out = np.empty(X.shape[0], dtype=Y_DTYPE)\n    _predict_from_binned_data(self.nodes, X, self.binned_left_cat_bitsets, missing_values_bin_idx, n_threads, out)\n    return out",
    "docstring": "Predict raw values for binned data. Parameters ---------- X : ndarray, shape (n_samples, n_features) The input samples. missing_values_bin_idx : uint8 Index of the bin that is used for missing values. This is the index of the last bin and is always equal to max_bins (as passed to the GBDT classes), or equivalently to n_bins - 1. n_threads : int Number of OpenMP threads to use. Returns ------- y : ndarray, shape (n_samples,) The raw predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\predictor.py",
    "ast_data": "FunctionDef name:predict_binned arg:self arg:X arg:missing_values_bin_idx arg:n_threads arguments arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    X = self._validate_for_predict(X)\n    predict = self._sparse_predict if self._sparse else self._dense_predict\n    return predict(X)",
    "docstring": "Perform regression on samples in X. For an one-class model, +1 (inlier) or -1 (outlier) is returned. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) For kernel=\"precomputed\", the expected shape of X is (n_samples_test, n_samples_train). Returns ------- y_pred : ndarray of shape (n_samples,) The predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\svm\\_base.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "wait_for_final",
    "source_code": "def wait_for_final(self, expected_version):\n    active_version, state = self.get_rdzv_state()\n    while True:\n        if state['status'] == 'final' and state['version'] == expected_version:\n            return active_version\n        elif state['status'] == 'frozen' and state['version'] == expected_version:\n            active_version, state = self.try_wait_for_state_change(etcd_index=active_version.etcd_index + 1)\n        else:\n            raise EtcdRendezvousRetryableFailure('Rendezvous state transition no longer possible. Must re-enter.')",
    "docstring": "Helper method for the confirm phase.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_rendezvous.py",
    "ast_data": "FunctionDef name:wait_for_final arg:self arg:expected_version arguments arg arg Assign Call While If BoolOp Compare Compare Return return:yes If BoolOp Compare Compare Assign Call Raise Call"
  },
  {
    "library": "scrapy",
    "name": "NoReferrerPolicy",
    "source_code": "class NoReferrerPolicy(ReferrerPolicy):\n    name: str = POLICY_NO_REFERRER\n\n    def referrer(self, response_url: str, request_url: str) -> str | None:\n        return None",
    "docstring": "The simplest policy is \"no-referrer\", which specifies that no referrer information is to be sent along with requests made from a particular request client to any origin. The header will be omitted entirely.",
    "type": "class",
    "file_path": "scrapy\\scrapy\\spidermiddlewares\\referer.py",
    "ast_data": "ClassDef name:NoReferrerPolicy FunctionDef name:referrer arg:self arg:response_url arg:request_url arguments arg arg arg Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "_UnicodeMinusFormat",
    "source_code": "class _UnicodeMinusFormat(string.Formatter):\n\n    def format_field(self, value, format_spec):\n        return Formatter.fix_minus(super().format_field(value, format_spec))",
    "docstring": "A specialized string formatter so that respects :rc:. This implementation relies on the fact that the format string is only ever called with kwargs *x* and *pos*, so it blindly replaces dashes by unicode minuses without further checking.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "ClassDef name:_UnicodeMinusFormat FunctionDef name:format_field arg:self arg:value arg:format_spec arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "TGreatestUpperBound",
    "source_code": "class TGreatestUpperBound(Constraint):\n\n    def __init__(self, res, rhs1, rhs2):\n        self.res = res\n        self.rhs1 = rhs1\n        self.rhs2 = rhs2\n\n    def __repr__(self):\n        return f'{self.res} = {self.rhs1}⊔*{self.rhs2}'\n\n    def __eq__(self, other):\n        if isinstance(other, TGreatestUpperBound):\n            return self.res == other.res and self.rhs1 == other.rhs1 and (self.rhs2 == other.rhs2)\n        else:\n            return False",
    "docstring": "Greatest Upper bound for tensors with dynamic type",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py",
    "ast_data": "ClassDef name:TGreatestUpperBound FunctionDef name:__init__ arg:self arg:res arg:rhs1 arg:rhs2 arguments arg arg arg arg Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:__eq__ arg:self arg:other arguments arg arg If Call Return return:yes BoolOp Compare Compare Compare Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "allocate",
    "source_code": "def allocate(self, block: Allocation, is_last: bool) -> bool:\n    return False",
    "docstring": "Try to assign block to a memory location in this bool. Return True if an assignment was made.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\memory_planning.py",
    "ast_data": "FunctionDef name:allocate arg:self arg:block arg:is_last arguments arg arg arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "PatchEmbed",
    "source_code": "class PatchEmbed(Module):\n\n    def __init__(self, kernel_size: tuple[int, int]=(16, 16), stride: tuple[int, int]=(16, 16), padding: tuple[int, int]=(0, 0), in_chans: int=3, embed_dim: int=768) -> None:\n        super().__init__()\n        self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)\n\n    def forward(self, x: Tensor) -> Tensor:\n        x = self.proj(x)\n        x = x.permute(0, 2, 3, 1)\n        return x",
    "docstring": "Image to Patch Embedding.",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\image_encoder.py",
    "ast_data": "ClassDef name:PatchEmbed FunctionDef name:__init__ arg:self arg:kernel_size arg:stride arg:padding arg:in_chans arg:embed_dim arguments arg arg arg arg arg arg Call Call Assign Call FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_set_oob_score",
    "source_code": "@abstractmethod\ndef _set_oob_score(self, X, y):\n    pass",
    "docstring": "Calculate out of bag predictions and score.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:_set_oob_score arg:self arg:X arg:y arguments arg arg arg"
  },
  {
    "library": "pandas",
    "name": "_process_style",
    "source_code": "def _process_style(self, style: dict[str, Any] | None) -> str | None:\n    from odf.style import ParagraphProperties, Style, TableCellProperties, TextProperties\n    if style is None:\n        return None\n    style_key = json.dumps(style)\n    if style_key in self._style_dict:\n        return self._style_dict[style_key]\n    name = f'pd{len(self._style_dict) + 1}'\n    self._style_dict[style_key] = name\n    odf_style = Style(name=name, family='table-cell')\n    if 'font' in style:\n        font = style['font']\n        if font.get('bold', False):\n            odf_style.addElement(TextProperties(fontweight='bold'))\n    if 'borders' in style:\n        borders = style['borders']\n        for side, thickness in borders.items():\n            thickness_translation = {'thin': '0.75pt solid #000000'}\n            odf_style.addElement(TableCellProperties(attributes={f'border{side}': thickness_translation[thickness]}))\n    if 'alignment' in style:\n        alignment = style['alignment']\n        horizontal = alignment.get('horizontal')\n        if horizontal:\n            odf_style.addElement(ParagraphProperties(textalign=horizontal))\n        vertical = alignment.get('vertical')\n        if vertical:\n            odf_style.addElement(TableCellProperties(verticalalign=vertical))\n    self.book.styles.addElement(odf_style)\n    return name",
    "docstring": "Convert a style dictionary to a OpenDocument style sheet Parameters ---------- style : Dict Style dictionary Returns ------- style_key : str Unique style key for later reference in sheet",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_odswriter.py",
    "ast_data": "FunctionDef name:_process_style arg:self arg:style arguments arg arg If Compare Return return:no Assign Call If Compare Return return:yes Assign Call Assign Assign Call If Compare Assign If Call Call Call If Compare Assign For Call Assign Call Call If Compare Assign Assign Call If Call Call Assign Call If Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compute_file_sha256",
    "source_code": "def compute_file_sha256(path: str) -> str:\n    if not os.path.exists(path):\n        return ''\n    hash = hashlib.sha256()\n    with open(path, 'rb') as f:\n        for b in f:\n            hash.update(b)\n    return hash.hexdigest()",
    "docstring": "Compute the SHA256 hash of a file and return it as a hex string.",
    "type": "function",
    "file_path": "pytorch\\tools\\linter\\adapters\\update_s3.py",
    "ast_data": "FunctionDef name:compute_file_sha256 arg:path arguments arg If Call Return return:yes Assign Call With Call For Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_has_reductions",
    "source_code": "def _has_reductions(self):\n    return len(self._reduce_map)",
    "docstring": "True if some reductions where performed by loop body.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:_has_reductions arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_view",
    "source_code": "def _view(self) -> Self:\n    result = self._simple_new(self._values, name=self._name, refs=self._references)\n    result._cache = self._cache\n    return result",
    "docstring": "fastpath to make a shallow copy, i.e. new object with same data.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_view arg:self arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "start_cancel",
    "source_code": "def start_cancel(self):\n    pywrap_tfe.TFE_CancellationManagerStartCancel(self._impl)",
    "docstring": "Cancels blocking operations that have been registered with this object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\cancellation.py",
    "ast_data": "FunctionDef name:start_cancel arg:self arguments arg Call"
  },
  {
    "library": "pygame",
    "name": "repaint_rect",
    "source_code": "def repaint_rect(self, screen_rect):\n    if self._clip:\n        self.lostsprites.append(screen_rect.clip(self._clip))\n    else:\n        self.lostsprites.append(Rect(screen_rect))",
    "docstring": "repaint the given area LayeredDirty.repaint_rect(screen_rect): return None screen_rect is in screen coordinates.",
    "type": "method",
    "file_path": "pygame\\src_py\\sprite.py",
    "ast_data": "FunctionDef name:repaint_rect arg:self arg:screen_rect arguments arg arg If Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_update_conv_input_qspec_map_after_replacement",
    "source_code": "def _update_conv_input_qspec_map_after_replacement(original_node: Node, replacement_node: Node):\n    assert _is_conv_or_conv_transpose_node(original_node)\n    assert _is_conv_or_conv_transpose_node(replacement_node)\n    if 'quantization_annotation' not in original_node.meta:\n        return\n    original_input_qspec_map = original_node.meta['quantization_annotation'].input_qspec_map\n    input_qspec_map = {}\n    all_configs = list(original_input_qspec_map.items())\n    input_qspec_map[replacement_node.args[0]] = all_configs[0][1]\n    input_qspec_map[replacement_node.args[1]] = all_configs[1][1]\n    if len(replacement_node.args) > 2 and len(all_configs) > 2:\n        input_qspec_map[replacement_node.args[2]] = all_configs[2][1]\n    replacement_node.meta['quantization_annotation'].input_qspec_map = input_qspec_map",
    "docstring": "Update the in the annotation after subgraph rewriting. The original annotation referred to the nodes in the original graph, so the keys in the will need to be updated to reflect the corresponding nodes in the replacement graph.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\qat_utils.py",
    "ast_data": "FunctionDef name:_update_conv_input_qspec_map_after_replacement arg:original_node arg:replacement_node arguments arg arg Call Call If Compare Return return:no Assign Assign Assign Call Call Assign Assign If BoolOp Compare Call Compare Call Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, base=1.0, offset=0.0):\n    self._edge = _Edge_integer(base, 0)\n    self._offset = offset",
    "docstring": "Parameters ---------- base : float > 0, default: 1.0 Interval between ticks. offset : float, default: 0.0 Value added to each multiple of *base*. .. versionadded:: 3.8",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:base arg:offset arguments arg arg arg Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "_get_current_allocator",
    "source_code": "def _get_current_allocator() -> _CUDAAllocator:\n    return _CUDAAllocator(torch._C._cuda_getAllocator())",
    "docstring": "Return the allocator being currently used. .. note:: See :ref: for details on creating and using a custom allocator",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:_get_current_allocator arguments Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "convert_pt2e",
    "source_code": "def convert_pt2e(model: GraphModule, use_reference_representation: bool=False, fold_quantize: bool=True) -> GraphModule:\n    torch._C._log_api_usage_once('quantization_api.quantize_pt2e.convert_pt2e')\n    if not isinstance(use_reference_representation, bool):\n        raise ValueError(f'Unexpected argument type for `use_reference_representation`, please make sure you intend to pass argument {use_reference_representation} to convert_pt2e')\n    original_graph_meta = model.meta\n    model = _convert_to_reference_decomposed_fx(model)\n    model = _fold_conv_bn_qat(model)\n    pm = PassManager([DuplicateDQPass()])\n    model = pm(model).graph_module\n    pm = PassManager([PortNodeMetaForQDQ()])\n    model = pm(model).graph_module\n    if fold_quantize:\n        constant_fold(model, _quant_node_constraint)\n    if use_reference_representation:\n        model = reference_representation_rewrite(model)\n    model.meta.update(original_graph_meta)\n    model = _disallow_eval_train(model)\n    return model",
    "docstring": "Convert a calibrated/trained model to a quantized model Args: * (torch.fx.GraphModule): calibrated/trained model * (bool): boolean flag to indicate whether to produce referece representation or not * (bool): boolean flag for whether fold the quantize op or not Returns: quantized model, either in q/dq representation or reference representation Example:: # prepared_model: the model produced by / and calibration/training # produces a quantized model that represents quantized computation with # quantize dequantize ops and fp32 ops by default. # Please refer to # # for detailed explanation of output quantized model quantized_model = convert_pt2e(prepared_model)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize_pt2e.py",
    "ast_data": "FunctionDef name:convert_pt2e arg:model arg:use_reference_representation arg:fold_quantize arguments arg arg arg Call If Call Raise Call Assign Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Assign Call If Call If Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "Similarity",
    "source_code": "class Similarity(BaseModel):\n\n    def __init__(self, rotation: bool=True, scale: bool=True, shift: bool=True) -> None:\n        super().__init__()\n        if rotation:\n            self.rot = nn.Parameter(torch.zeros(1))\n        else:\n            self.register_buffer('rot', torch.zeros(1))\n        if shift:\n            self.shift = nn.Parameter(torch.zeros(1, 2, 1))\n        else:\n            self.register_buffer('shift', torch.zeros(1, 2, 1))\n        if scale:\n            self.scale = nn.Parameter(torch.ones(1))\n        else:\n            self.register_buffer('scale', torch.ones(1))\n        self.reset_model()\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}(angle = {self.rot},               \\n shift={self.shift}, \\n scale={self.scale})'\n\n    def reset_model(self) -> None:\n        torch.nn.init.zeros_(self.rot)\n        torch.nn.init.zeros_(self.shift)\n        torch.nn.init.ones_(self.scale)\n\n    def forward(self) -> Tensor:\n        rot = self.scale * angle_to_rotation_matrix(self.rot)\n        out = convert_affinematrix_to_homography(torch.cat([rot, self.shift], dim=2))\n        return out\n\n    def forward_inverse(self) -> Tensor:\n        return torch.inverse(self.forward())",
    "docstring": "Similarity geometric model to be used with ImageRegistrator module for the optimization-based image registration. Args: rotation: if True, the rotation is optimizable, else constant zero. scale: if True, the scale is optimizable, else constant zero. shift: if True, the shift is optimizable, else constant one.",
    "type": "class",
    "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py",
    "ast_data": "ClassDef name:Similarity FunctionDef name:__init__ arg:self arg:rotation arg:scale arg:shift arguments arg arg arg arg Call Call If Assign Call Call Call Call If Assign Call Call Call Call If Assign Call Call Call Call Call FunctionDef name:__repr__ arg:self arguments arg Return return:yes FunctionDef name:reset_model arg:self arguments arg Call Call Call FunctionDef name:forward arg:self arguments arg Assign Call Assign Call Call Return return:yes FunctionDef name:forward_inverse arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    self._check_vocabulary()\n    return np.asarray([t for t, i in sorted(self.vocabulary_.items(), key=itemgetter(1))], dtype=object)",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Return return:yes Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_capheight",
    "source_code": "def get_capheight(self):\n    return self._header[b'CapHeight']",
    "docstring": "Return the cap height as float.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_capheight arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_validate_structured_pruning",
    "source_code": "def _validate_structured_pruning(t):\n    shape = t.shape\n    if len(shape) <= 1:\n        raise ValueError(f'Structured pruning can only be applied to multidimensional tensors. Found tensor of shape {shape} with {len(shape)} dims')",
    "docstring": "Validate that the tensor to be pruned is at least 2-Dimensional. Validation helper to check that the tensor to be pruned is multi- dimensional, such that the concept of \"channels\" is well-defined. Args: t (torch.Tensor): tensor representing the parameter to prune Raises: ValueError: if the tensor is not at least 2D.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\prune.py",
    "ast_data": "FunctionDef name:_validate_structured_pruning arg:t arguments arg Assign If Compare Call Raise Call Call"
  },
  {
    "library": "matplotlib",
    "name": "on_draw",
    "source_code": "def on_draw(self, event):\n    cv = self.canvas\n    if event is not None:\n        if event.canvas != cv:\n            raise RuntimeError\n    self._bg = cv.copy_from_bbox(cv.figure.bbox)\n    self._draw_animated()",
    "docstring": "Callback to register with 'draw_event'.",
    "type": "method",
    "file_path": "matplotlib\\galleries\\users_explain\\animations\\blitting.py",
    "ast_data": "FunctionDef name:on_draw arg:self arg:event arguments arg arg Assign If Compare If Compare Raise Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "kind",
    "source_code": "@property\ndef kind(self) -> SparseIndexKind:\n    if isinstance(self.sp_index, IntIndex):\n        return 'integer'\n    else:\n        return 'block'",
    "docstring": "The kind of sparse index for this array. One of {'integer', 'block'}.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\sparse\\array.py",
    "ast_data": "FunctionDef name:kind arg:self arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "encode_resource_handle",
    "source_code": "def encode_resource_handle(resource_handle):\n    return numpy_compat.np_asarray(bytearray(resource_handle.SerializeToString()), dtype=dtypes.np_resource)",
    "docstring": "Encode a ResourceHandle proto as custom numpy struct type.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:encode_resource_handle arg:resource_handle arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "_get_current_week",
    "source_code": "def _get_current_week(self, date):\n    return date - datetime.timedelta(self._get_weekday(date))",
    "docstring": "Return the start date of the current interval.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:_get_current_week arg:self arg:date arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_split",
    "source_code": "@array_function_dispatch(_split_dispatcher)\ndef _split(a, sep=None, maxsplit=None):\n    return _vec_string(a, np.object_, 'split', [sep] + _clean_args(maxsplit))",
    "docstring": "For each element in , return a list of the words in the string, using as the delimiter string. Calls :meth: element-wise. Parameters ---------- a : array-like, with `sepmaxsplitmaxsplit` splits are done. Returns ------- out : ndarray Array of list objects Examples -------- >>> import numpy as np >>> x = np.array(\"Numpy is nice!\") >>> np.strings.split(x, \" \") # doctest: +SKIP array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP >>> np.strings.split(x, \" \", 1) # doctest: +SKIP array(list(['Numpy', 'is nice!']), dtype=object) # doctest: +SKIP See Also -------- str.split, rsplit",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\strings.py",
    "ast_data": "FunctionDef name:_split arg:a arg:sep arg:maxsplit arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "layer_call_wrapper",
    "source_code": "def layer_call_wrapper(call_collection, method, name):\n\n    def wrapper(*args, **kwargs):\n        layer = call_collection.layer\n        training = None\n        inputs = _filtered_inputs([args, kwargs])\n        if (args or kwargs) and call_collection.training_arg_was_passed(args, kwargs):\n            training = call_collection.get_training_arg_value(args, kwargs)\n        original_losses = _reset_layer_losses(layer)\n        with base_layer_utils.call_context().enter(layer, inputs=inputs, build_graph=False, training=training, saving=True):\n            with autocast_variable.enable_auto_cast_variables(layer._compute_dtype_object):\n                ret = method(*args, **kwargs)\n        _restore_layer_losses(original_losses)\n        return ret\n    fn = tf_decorator.make_decorator(target=method, decorator_func=wrapper)\n    fn.__name__ = name\n    return fn",
    "docstring": "Ensures layer losses are kept the same, and runs method in call context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py",
    "ast_data": "FunctionDef name:layer_call_wrapper arg:call_collection arg:method arg:name arguments arg arg arg FunctionDef name:wrapper arguments arg arg Assign Assign Assign Call If BoolOp BoolOp Call Assign Call Assign Call With Call Call With Call Assign Call Call Return return:yes Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_op_name",
    "source_code": "def get_op_name(tensor_name):\n    if not tensor_name:\n        raise ValueError(f'Tensor name cannot be empty or None. Received: {tensor_name}.')\n    if tensor_name.startswith('^'):\n        tensor_name = tensor_name[1:]\n    if ':' in tensor_name:\n        op_name, _ = tensor_name.split(':')\n        return op_name\n    return tensor_name",
    "docstring": "Extract the Op name from a Tensor name. The Op name is everything before a colon, if present, not including any ^ prefix denoting a control dependency. Args: tensor_name: the full name of a Tensor in the graph. Returns: The name of the Op of which the given Tensor is an output. Raises: ValueError: if tensor_name is None or empty.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:get_op_name arg:tensor_name arguments arg If Raise Call If Call Assign If Compare Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_check_tensor_all",
    "source_code": "def _check_tensor_all(cond, message=None):\n    _check_tensor_all_with(RuntimeError, cond, message)",
    "docstring": "Throws error containing an optional message if the specified condition is False. Error type: `torch.Tensor`",
    "type": "function",
    "file_path": "pytorch\\torch\\__init__.py",
    "ast_data": "FunctionDef name:_check_tensor_all arg:cond arg:message arguments arg arg Call"
  },
  {
    "library": "numpy",
    "name": "busday_count",
    "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)\ndef busday_count(begindates, enddates, weekmask=None, holidays=None, busdaycal=None, out=None):\n    return (begindates, enddates, weekmask, holidays, out)",
    "docstring": "busday_count( begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None ) Counts the number of valid days between and , not including the day of . If `busdaycalendar` together, containing the number of valid days between the begin and end dates. See Also -------- busdaycalendar : An object that specifies a custom set of valid days. is_busday : Returns a boolean array indicating valid days. busday_offset : Applies an offset counted in valid days. Examples -------- >>> import numpy as np >>> # Number of weekdays in January 2011 ... np.busday_count('2011-01', '2011-02') 21 >>> # Number of weekdays in 2011 >>> np.busday_count('2011', '2012') 260 >>> # Number of Saturdays in 2011 ... np.busday_count('2011', '2012', weekmask='Sat') 53",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\multiarray.py",
    "ast_data": "FunctionDef name:busday_count arg:begindates arg:enddates arg:weekmask arg:holidays arg:busdaycal arg:out arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_read_uint32",
    "source_code": "def _read_uint32(f):\n    return np.uint32(struct.unpack('>I', f.read(4))[0])",
    "docstring": "Read an unsigned 32-bit integer",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_idl.py",
    "ast_data": "FunctionDef name:_read_uint32 arg:f arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "pandas",
    "name": "is_url",
    "source_code": "def is_url(url: object) -> bool:\n    if not isinstance(url, str):\n        return False\n    return parse_url(url).scheme in _VALID_URLS",
    "docstring": "Check to see if a URL has a valid protocol. Parameters ---------- url : str or unicode Returns ------- isurl : bool If has a valid protocol return True otherwise False.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:is_url arg:url arguments arg If Call Return return:yes Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "ymax",
    "source_code": "@property\ndef ymax(self):\n    return np.max(self.get_points()[:, 1])",
    "docstring": "The top edge of the bounding box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:ymax arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_dated_items",
    "source_code": "def get_dated_items(self):\n    return self._get_dated_items(datetime.date.today())",
    "docstring": "Return (date_list, items, extra_context) for this request.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_dated_items arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, pred=None, pivot=None, branch=None, name='cond_text', context_def=None, import_scope=None):\n    self._name = ops.get_default_graph().unique_name(name)\n    if context_def:\n        self._init_from_proto(context_def, import_scope=import_scope)\n    else:\n        ControlFlowContext.__init__(self)\n        self._pred = pred\n        self._pivot = pivot\n        self._branch = branch\n        self._values.add(pred.name)\n        self._external_values[pred.name] = pred\n        self._values.add(pivot.name)\n        pivot.op._set_control_flow_context(self)",
    "docstring": "Creates a . Args: pred: The tensor for the conditional predicate. pivot: The predicate tensor in this branch. branch: 0 or 1 representing this branch. name: Name of the python object. context_def: Optional protocol buffer to initialize the object from. import_scope: Optional . Name scope to add. Only used when initialing from protocol buffer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:pred arg:pivot arg:branch arg:name arg:context_def arg:import_scope arguments arg arg arg arg arg arg arg Assign Call Call If Call Call Assign Assign Assign Call Assign Call Call"
  },
  {
    "library": "scipy",
    "name": "_periodic_knots",
    "source_code": "def _periodic_knots(x, k):\n    xc = np.copy(x)\n    n = len(xc)\n    if k % 2 == 0:\n        dx = np.diff(xc)\n        xc[1:-1] -= dx[:-1] / 2\n    dx = np.diff(xc)\n    t = np.zeros(n + 2 * k)\n    t[k:-k] = xc\n    for i in range(0, k):\n        t[k - i - 1] = t[k - i] - dx[-(i % (n - 1)) - 1]\n        t[-k + i] = t[-k + i - 1] + dx[i % (n - 1)]\n    return t",
    "docstring": "returns vector of nodes on circle",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_bsplines.py",
    "ast_data": "FunctionDef name:_periodic_knots arg:x arg:k arguments arg arg Assign Call Assign Call If Compare Assign Call Assign Call Assign Call Assign For Call Assign Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "_pdf",
    "source_code": "def _pdf(self, x):\n    return self._hpdf[np.searchsorted(self._hbins, x, side='right')]",
    "docstring": "PDF of the histogram",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_pdf arg:self arg:x arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "is_categorical_column_weighted",
    "source_code": "def is_categorical_column_weighted(self):\n    if isinstance(self.categorical_column, (fc._WeightedCategoricalColumn, fc_lib.WeightedCategoricalColumn)):\n        return True\n    return False",
    "docstring": "Check if the categorical column of the embedding column is weighted.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:is_categorical_column_weighted arg:self arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "UmfpackNotFoundError",
    "source_code": "class UmfpackNotFoundError(NotFoundError):\n    pass",
    "docstring": "UMFPACK sparse solver ( not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [umfpack]) or by setting the UMFPACK environment variable.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:UmfpackNotFoundError"
  },
  {
    "library": "scipy",
    "name": "trimmed_var",
    "source_code": "def trimmed_var(a, limits=(0.1, 0.1), inclusive=(1, 1), relative=True, axis=None, ddof=0):\n    if not isinstance(limits, tuple) and isinstance(limits, float):\n        limits = (limits, limits)\n    if relative:\n        out = trimr(a, limits=limits, inclusive=inclusive, axis=axis)\n    else:\n        out = trima(a, limits=limits, inclusive=inclusive)\n    return out.var(axis=axis, ddof=ddof)",
    "docstring": "Returns the trimmed variance of the data along the given axis. %s ddof : {0,integer}, optional Means Delta Degrees of Freedom. The denominator used during computations is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- biased estimate of the variance.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:trimmed_var arg:a arg:limits arg:inclusive arg:relative arg:axis arg:ddof arguments arg arg arg arg arg arg If BoolOp Call Call Assign If Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "compare_references",
    "source_code": "def compare_references(self, old_refs: Sequence[nodes.Element], new_refs: Sequence[nodes.Element], warning_msg: str) -> None:\n    if not self.noqa and len(old_refs) != len(new_refs):\n        old_ref_rawsources = [ref.rawsource for ref in old_refs]\n        new_ref_rawsources = [ref.rawsource for ref in new_refs]\n        logger.warning(warning_msg.format(old_ref_rawsources, new_ref_rawsources), location=self.node, type='i18n', subtype='inconsistent_references')",
    "docstring": "Warn about mismatches between references in original and translated content.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\transforms\\i18n.py",
    "ast_data": "FunctionDef name:compare_references arg:self arg:old_refs arg:new_refs arg:warning_msg arguments arg arg arg arg If BoolOp Compare Call Call Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "container",
    "source_code": "@tf_contextlib.contextmanager\ndef container(self, container_name):\n    original_container = self._container\n    with ops.init_scope():\n        original_init_container = ops.get_default_graph()._container\n    try:\n        self._container = container_name\n        with ops.init_scope():\n            ops.get_default_graph()._container = container_name\n        yield self._container\n    finally:\n        self._container = original_container\n        with ops.init_scope():\n            ops.get_default_graph()._container = original_init_container",
    "docstring": "Returns a context manager that specifies the resource container to use. Overridden from to update both the init_scope container and the present inner container. This is necessary to make sure setting containers applies correctly both to created variables and to stateful ops. Args: container_name: container name string. Returns: A context manager for defining resource containers for stateful ops, yields the container name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:container arg:self arg:container_name arguments arg arg Assign With Call Assign Call Try Assign With Call Assign Call Assign With Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "should_checkpoint",
    "source_code": "@property\ndef should_checkpoint(self):\n    raise NotImplementedError('must be implemented in descendants')",
    "docstring": "Whether checkpointing is needed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:should_checkpoint arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "is_python",
    "source_code": "def is_python(cell):\n    return cell['cell_type'] == 'code' and cell['source'] and (not cell['source'][0].startswith('%%'))",
    "docstring": "Checks if the cell consists of Python code.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ipynb.py",
    "ast_data": "FunctionDef name:is_python arg:cell arguments arg Return return:yes BoolOp Compare Call"
  },
  {
    "library": "virtualenv",
    "name": "close",
    "source_code": "def close(self):\n    pass",
    "docstring": "Do nothing.",
    "type": "method",
    "file_path": "virtualenv\\src\\virtualenv\\app_data\\na.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "ConvolutionDimensionNumbers",
    "source_code": "class ConvolutionDimensionNumbers:\n    __slots__ = ('input_batch_dimension', 'input_feature_dimension', 'input_spatial_dimensions', 'kernel_input_feature_dimension', 'kernel_output_feature_dimension', 'kernel_spatial_dimensions', 'output_batch_dimension', 'output_feature_dimension', 'output_spatial_dimensions')\n\n    def __init__(self):\n        self.input_batch_dimension = 0\n        self.input_feature_dimension = 0\n        self.input_spatial_dimensions = []\n        self.kernel_input_feature_dimension = 0\n        self.kernel_output_feature_dimension = 0\n        self.kernel_spatial_dimensions = []\n        self.output_batch_dimension = 0\n        self.output_feature_dimension = 0\n        self.output_spatial_dimensions = []",
    "docstring": "Python representation of a xla.ConvolutionDimensionNumbers protobuf.",
    "type": "class",
    "file_path": "tensorflow\\third_party\\xla\\xla\\python\\xla_client.py",
    "ast_data": "ClassDef name:ConvolutionDimensionNumbers Assign FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_get_alpha_vec",
    "source_code": "@staticmethod\ndef _get_alpha_vec(x, y, tris_pts):\n    ndim = tris_pts.ndim - 2\n    a = tris_pts[:, 1, :] - tris_pts[:, 0, :]\n    b = tris_pts[:, 2, :] - tris_pts[:, 0, :]\n    abT = np.stack([a, b], axis=-1)\n    ab = _transpose_vectorized(abT)\n    OM = np.stack([x, y], axis=1) - tris_pts[:, 0, :]\n    metric = ab @ abT\n    metric_inv = _pseudo_inv22sym_vectorized(metric)\n    Covar = ab @ _transpose_vectorized(np.expand_dims(OM, ndim))\n    ksi = metric_inv @ Covar\n    alpha = _to_matrix_vectorized([[1 - ksi[:, 0, 0] - ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]])\n    return alpha",
    "docstring": "Fast (vectorized) function to compute barycentric coordinates alpha. Parameters ---------- x, y : array-like of dim 1 (shape (nx,)) Coordinates of the points whose points barycentric coordinates are requested. tris_pts : array like of dim 3 (shape: (nx, 3, 2)) Coordinates of the containing triangles apexes. Returns ------- array of dim 2 (shape (nx, 3)) Barycentric coordinates of the points inside the containing triangles.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "FunctionDef name:_get_alpha_vec arg:x arg:y arg:tris_pts arguments arg arg arg Assign Assign Assign Assign Call Assign Call Assign Call Assign Assign Call Assign Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "floatx",
    "source_code": "def floatx():\n    return _FLOATX",
    "docstring": "Returns the default float type, as a string. E.g. , , . Returns: String, the current default float type. Example: >>> tf.keras.backend.floatx() 'float32'",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend_config.py",
    "ast_data": "FunctionDef name:floatx arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "num_executions",
    "source_code": "def num_executions(self):\n    return len(self._execution_digests)",
    "docstring": "Get the number of execution events read so far.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:num_executions arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "renamer",
    "source_code": "def renamer(x, suffix: str | None):\n    if x in to_rename and suffix is not None:\n        return f'{x}{suffix}'\n    return x",
    "docstring": "Rename the left and right indices. If there is overlap, and suffix is not None, add suffix, otherwise, leave it as-is. Parameters ---------- x : original column name suffix : str or None Returns ------- x : renamed column name",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\merge.py",
    "ast_data": "FunctionDef name:renamer arg:x arg:suffix arguments arg arg If BoolOp Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "nsmallest",
    "source_code": "def nsmallest(self, n: int, columns: IndexLabel, keep: NsmallestNlargestKeep='first') -> DataFrame:\n    return selectn.SelectNFrame(self, n=n, keep=keep, columns=columns).nsmallest()",
    "docstring": "Return the first rows ordered by in ascending order. Return the first rows with the smallest values in , in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to `ncolumnsncolumnsn` distinct smallest elements: >>> df.nsmallest(4, \"population\", keep=\"all\") population GDP alpha-2 Tuvalu 11300 38 TV Anguilla 11300 311 AI Iceland 337000 17036 IS Nauru 337000 182 NR To order by the smallest values in column \"population\" and then \"GDP\", we can specify multiple columns like in the next example. >>> df.nsmallest(3, [\"population\", \"GDP\"]) population GDP alpha-2 Tuvalu 11300 38 TV Anguilla 11300 311 AI Nauru 337000 182 NR",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:nsmallest arg:self arg:n arg:columns arg:keep arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "fetches",
    "source_code": "def fetches(self):\n    return self._final_fetches",
    "docstring": "Return the unique names of tensors to fetch. Returns: A list of strings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:fetches arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "colorbar",
    "source_code": "@property\ndef colorbar(self):\n    return self._colorizer.colorbar",
    "docstring": "The last colorbar associated with this object. May be None",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py",
    "ast_data": "FunctionDef name:colorbar arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "prune",
    "source_code": "def prune(self) -> None:\n    self.traced = symbolic_trace(self.model)\n    modules = dict(self.traced.named_modules())\n    for node in self.traced.graph.nodes:\n        for pattern, convert_fn in self.patterns.items():\n            matched = apply_match(modules, pattern, node, [])\n            if matched is None:\n                continue\n            first_module = modules.get(node.target)\n            if first_module is not None and parametrize.is_parametrized(first_module) and module_contains_param(first_module, FakeStructuredSparsity):\n                convert_block = []\n                for node in matched:\n                    if node.op == 'call_module':\n                        convert_block.append(modules.get(node.target))\n                    elif node.op == 'call_function':\n                        convert_block.append(node.target)\n                convert_fn(*convert_block)\n    for module in self.traced.modules():\n        if module_contains_param(module, FakeStructuredSparsity):\n            raise Exception(f'Error: {module} still contains FakeStructuredSparsity parametrizations!')\n    self.traced.graph.lint()\n    self.traced.recompile()\n    return self.traced",
    "docstring": "This function will FX symbolically trace the model and then find instances of the patterns defined in self.patterns (by default SUPPORTED_STRUCTURED_PRUNING_PATTERNS ). For each pattern, it will apply to corresponding conversion function, which will modify the output and input size expected by the modules within the pattern",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\base_structured_sparsifier.py",
    "ast_data": "FunctionDef name:prune arg:self arguments arg Assign Call Assign Call Call For For Call Assign Call If Compare Assign Call If BoolOp Compare Call Call Assign For If Compare Call Call If Compare Call Call For Call If Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "stop_rasterizing",
    "source_code": "def stop_rasterizing(self):\n    self._renderer = self._vector_renderer\n    height = self._height * self.dpi\n    img = np.asarray(self._raster_renderer.buffer_rgba())\n    slice_y, slice_x = cbook._get_nonzero_slices(img[..., 3])\n    cropped_img = img[slice_y, slice_x]\n    if cropped_img.size:\n        gc = self._renderer.new_gc()\n        self._renderer.draw_image(gc, slice_x.start * self._figdpi / self.dpi, (height - slice_y.stop) * self._figdpi / self.dpi, cropped_img[::-1])\n    self._raster_renderer = None\n    self.figure.dpi = self._figdpi\n    if self._bbox_inches_restore:\n        r = process_figure_for_rasterizing(self.figure, self._bbox_inches_restore, self._vector_renderer, self._figdpi)\n        self._bbox_inches_restore = r",
    "docstring": "Exit \"raster\" mode. All of the drawing that was done since the last call will be copied to the vector backend by calling draw_image.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_mixed.py",
    "ast_data": "FunctionDef name:stop_rasterizing arg:self arguments arg Assign Assign Assign Call Call Assign Call Assign If Assign Call Call Assign Assign If Assign Call Assign"
  },
  {
    "library": "django",
    "name": "_check_form",
    "source_code": "def _check_form(self, obj):\n    if not _issubclass(obj.form, BaseModelForm):\n        return must_inherit_from(parent='BaseModelForm', option='form', obj=obj, id='admin.E016')\n    else:\n        return []",
    "docstring": "Check that form subclasses BaseModelForm.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_form arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "tensor_summary",
    "source_code": "@tf_export(v1=['summary.tensor_summary'])\ndef tensor_summary(name, tensor, summary_description=None, collections=None, summary_metadata=None, family=None, display_name=None):\n    if summary_metadata is None:\n        summary_metadata = _SummaryMetadata()\n    if summary_description is not None:\n        summary_metadata.summary_description = summary_description\n    if display_name is not None:\n        summary_metadata.display_name = display_name\n    serialized_summary_metadata = summary_metadata.SerializeToString()\n    if _distribute_summary_op_util.skip_summary():\n        return _constant_op.constant('')\n    with _summary_op_util.summary_scope(name, family, values=[tensor]) as (tag, scope):\n        val = _gen_logging_ops.tensor_summary_v2(tensor=tensor, tag=tag, name=scope, serialized_summary_metadata=serialized_summary_metadata)\n        _summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES])\n    return val",
    "docstring": "Outputs a protocol buffer with a serialized tensor.proto. Args: name: A name for the generated node. If display_name is not set, it will also serve as the tag name in TensorBoard. (In that case, the tag name will inherit tf name scopes.) tensor: A tensor of any type and shape to serialize. summary_description: A long description of the summary sequence. Markdown is supported. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to . summary_metadata: Optional SummaryMetadata proto (which describes which plugins may use the summary value). family: Optional; if provided, used as the prefix of the summary tag, which controls the name used for display on TensorBoard when display_name is not set. display_name: A string used to name this data in TensorBoard. If this is not set, then the node name will be used instead. Returns: A scalar of type . The serialized protocol buffer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\summary.py",
    "ast_data": "FunctionDef name:tensor_summary arg:name arg:tensor arg:summary_description arg:collections arg:summary_metadata arg:family arg:display_name arguments arg arg arg arg arg arg arg If Compare Assign Call If Compare Assign If Compare Assign Assign Call If Call Return return:yes Call With Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "execute",
    "source_code": "def execute(gm: GraphModule, *args: Unpack[Ts], executor: str='aten', executor_parameters: Optional[dict]=None) -> Any:\n    if executor == 'aten':\n        return gm.forward(*args)\n    msg = f\"Received unexpected value for 'executor': {executor}. Allowed values are: aten.\"\n    raise ValueError(msg)",
    "docstring": "Prototype ATen executor. Just executes the context's graph.",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims\\executor.py",
    "ast_data": "FunctionDef name:execute arg:gm arguments arg arg arg arg If Compare Return return:yes Call Assign Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "score",
    "source_code": "def score(self, X, y=None):\n    xp, _ = get_namespace(X)\n    return float(xp.mean(self.score_samples(X)))",
    "docstring": "Return the average log-likelihood of all samples. See. \"Pattern Recognition and Machine Learning\" by C. Bishop, 12.2.1 p. 574 or Parameters ---------- X : array-like of shape (n_samples, n_features) The data. y : Ignored Ignored. Returns ------- ll : float Average log-likelihood of the samples under the current model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_pca.py",
    "ast_data": "FunctionDef name:score arg:self arg:X arg:y arguments arg arg arg Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "isValueType",
    "source_code": "def isValueType(typ: CType, properties: LazyIrProperties | None=None) -> bool:\n    if isinstance(typ, BaseCType):\n        treat_scalars_as_constants = properties and properties.TreatScalarsAsConstants\n        return typ.type == getValueT() or (typ.type == scalarT and (not treat_scalars_as_constants)) or typ.type == SymIntT\n    elif typ == VectorCType(BaseCType(SymIntT)):\n        return False\n    elif isinstance(typ, (OptionalCType, ListCType, VectorCType)):\n        return isValueType(typ.elem, properties)\n    return False",
    "docstring": "Given a type, determine if it is a Value-like type. This is equivalent to being Tensor-like, but assumes the type has already been transformed.",
    "type": "function",
    "file_path": "pytorch\\torchgen\\api\\lazy.py",
    "ast_data": "FunctionDef name:isValueType arg:typ arg:properties arguments arg arg If Call Assign BoolOp Return return:yes BoolOp Compare Call BoolOp Compare Compare If Compare Call Call Return return:yes If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "SoftmaxTransform",
    "source_code": "class SoftmaxTransform(Transform):\n    domain = constraints.real_vector\n    codomain = constraints.simplex\n\n    def __eq__(self, other):\n        return isinstance(other, SoftmaxTransform)\n\n    def _call(self, x):\n        logprobs = x\n        probs = (logprobs - logprobs.max(-1, True)[0]).exp()\n        return probs / probs.sum(-1, True)\n\n    def _inverse(self, y):\n        probs = y\n        return probs.log()\n\n    def forward_shape(self, shape):\n        if len(shape) < 1:\n            raise ValueError('Too few dimensions on input')\n        return shape\n\n    def inverse_shape(self, shape):\n        if len(shape) < 1:\n            raise ValueError('Too few dimensions on input')\n        return shape",
    "docstring": "Transform from unconstrained space to the simplex via :math: then normalizing. This is not bijective and cannot be used for HMC. However this acts mostly coordinate-wise (except for the final normalization), and thus is appropriate for coordinate-wise optimization algorithms.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributions\\transforms.py",
    "ast_data": "ClassDef name:SoftmaxTransform Assign Assign FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Call FunctionDef name:_call arg:self arg:x arguments arg arg Assign Assign Call Call Return return:yes Call FunctionDef name:_inverse arg:self arg:y arguments arg arg Assign Return return:yes Call FunctionDef name:forward_shape arg:self arg:shape arguments arg arg If Compare Call Raise Call Return return:yes FunctionDef name:inverse_shape arg:self arg:shape arguments arg arg If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "parse",
    "source_code": "def parse(self, s, dpi=72, prop=None, *, antialiased=None):\n    prop = prop.copy() if prop is not None else None\n    antialiased = mpl._val_or_rc(antialiased, 'text.antialiased')\n    from matplotlib.backends import backend_agg\n    load_glyph_flags = {'vector': LoadFlags.NO_HINTING, 'raster': backend_agg.get_hinting_flag()}[self._output_type]\n    return self._parse_cached(s, dpi, prop, antialiased, load_glyph_flags)",
    "docstring": "Parse the given math expression *s* at the given *dpi*. If *prop* is provided, it is a object specifying the \"default\" font to use in the math expression, used for all non-math text. The results are cached, so multiple calls to with the same expression should be fast. Depending on the *output* type, this returns either a or a .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\mathtext.py",
    "ast_data": "FunctionDef name:parse arg:self arg:s arg:dpi arg:prop arguments arg arg arg arg arg Assign Compare Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "synchronize",
    "source_code": "def synchronize(self) -> None:\n    super().synchronize()",
    "docstring": "Wait for the event to complete. Waits until the completion of all work currently captured in this event. This prevents the CPU thread from proceeding until the event completes. .. note:: This is a wrapper around `CUDA Event documentation`_ for more info.",
    "type": "method",
    "file_path": "pytorch\\torch\\cuda\\streams.py",
    "ast_data": "FunctionDef name:synchronize arg:self arguments arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_function",
    "source_code": "def _is_function(self, name) -> bool:\n    return compat.as_str(name) in self._functions",
    "docstring": "Tests whether 'name' is registered in this graph's function library. Args: name: string op name. Returns: bool indicating whether or not 'name' is registered in function library.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_is_function arg:self arg:name arguments arg arg Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_broadcast_bucket",
    "source_code": "def _broadcast_bucket(bucket_index: int, zero: ZeroRedundancyOptimizer):\n    overlap_info = zero._overlap_info\n    assert len(overlap_info.assigned_ranks_per_bucket) > bucket_index, '`assigned_ranks_per_bucket` is not fully constructed'\n    assigned_ranks = sorted(overlap_info.assigned_ranks_per_bucket[bucket_index])\n    assert len(assigned_ranks) > 0, f'Bucket {bucket_index} should be assigned to at least one rank'\n    for assigned_rank in assigned_ranks:\n        bucket_assignments = zero._bucket_assignments_per_rank[assigned_rank]\n        if bucket_index in bucket_assignments:\n            send_tensor = bucket_assignments[bucket_index].tensor\n            assert send_tensor is not None\n            overlap_info.broadcast_handles.append(dist.broadcast(send_tensor, src=dist.get_global_rank(zero.process_group, assigned_rank), group=zero.process_group, async_op=True))",
    "docstring": "Broadcasts a bucket's parameters. Arguments: bucket_index (int): the index of the bucket corresponding to the parameters to broadcast. zero (ZeroRedundancyOptimizer): the calling process's :class: instance.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\ddp_zero_hook.py",
    "ast_data": "FunctionDef name:_broadcast_bucket arg:bucket_index arg:zero arguments arg arg Assign Compare Call Assign Call Compare Call For Assign If Compare Assign Compare Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "run",
    "source_code": "@abstractmethod\ndef run(self, state_handler: Callable[[_RendezvousContext, float], _Action], deadline: float, update_deadline: Optional[Callable[[timedelta], float]]=None) -> None:\n    pass",
    "docstring": "Execute a rendezvous operation. An operation is run inside a state machine and is expected to transition the rendezvous from one state to another. Args: state_handler: A callable that is expected to return the next state transition action based on the current state of the rendezvous. deadline: The time, in seconds, at which the operation will be considered timed-out. update_deadline: Function to generate a new operation deadline if the current node may participate in the next rendezvous.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "FunctionDef name:run arg:self arg:state_handler arg:deadline arg:update_deadline arguments arg arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_group_device_list",
    "source_code": "def _group_device_list(devices):\n    assert not _is_device_list_single_worker(devices)\n    device_dict = {}\n    for d in devices:\n        d_spec = tf_device.DeviceSpec.from_string(d)\n        if d_spec.job not in device_dict:\n            device_dict[d_spec.job] = []\n        while len(device_dict[d_spec.job]) <= d_spec.task:\n            device_dict[d_spec.job].append([])\n        device_dict[d_spec.job][d_spec.task].append(d)\n    return device_dict",
    "docstring": "Groups the devices list by task_type and task_id. Args: devices: a list of device strings for remote devices. Returns: a dict of list of device strings mapping from task_type to a list of devices for the task_type in the ascending order of task_id.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\mirrored_strategy.py",
    "ast_data": "FunctionDef name:_group_device_list arg:devices arguments arg Call Assign For Assign Call If Compare Assign While Compare Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_more_precise",
    "source_code": "@compatibility(is_backward_compatible=False)\ndef is_more_precise(t1, t2):\n    if t1 == t2:\n        return True\n    if isinstance(t2, _DynType):\n        return True\n    if isinstance(t1, TensorType) and isinstance(t2, TensorType):\n        return len(t1.__args__) == len(t2.__args__) and all((is_more_precise(elem1, elem2) for elem1, elem2 in zip(t1.__args__, t2.__args__)))\n    else:\n        return False",
    "docstring": "A binary relation denoted by = TensorType((1,2,3)) int >= Dyn int >= int TensorType((1,Dyn,3)) <= TensorType((1,2,3))",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\tensor_type.py",
    "ast_data": "FunctionDef name:is_more_precise arg:t1 arg:t2 arguments arg arg If Compare Return return:yes If Call Return return:yes If BoolOp Call Call Return return:yes BoolOp Compare Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_fill_non_empty_info",
    "source_code": "@abstractmethod\ndef _fill_non_empty_info(self) -> None:\n    pass",
    "docstring": "Add lines to the info table, pertaining to non-empty dataframe.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:_fill_non_empty_info arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_raise_error_for_incorrect_control_flow_context",
    "source_code": "def _raise_error_for_incorrect_control_flow_context(self):\n    graph = ops.get_default_graph()\n    in_tpu_ctx = False\n    while graph is not None:\n        ctx = graph._get_control_flow_context()\n        while ctx is not None:\n            if isinstance(ctx, tpu_replication.TPUReplicateContext):\n                in_tpu_ctx = True\n                break\n            ctx = ctx.outer_context\n        if in_tpu_ctx:\n            break\n        graph = getattr(graph, 'outer_graph', None)\n    if graph != ops.get_default_graph() and in_tpu_ctx:\n        raise RuntimeError('Current graph {} does not match graph which contains TPUReplicateContext {}. This is most likely due to the fact that enqueueing embedding data is called inside control flow or a tf.function inside `strategy.run`. This is not supported because outside compilation fails to extract the enqueue ops as the head of a computation.'.format(ops.get_default_graph(), graph))\n    return in_tpu_ctx",
    "docstring": "Raises an error if we are not in the TPUReplicateContext.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:_raise_error_for_incorrect_control_flow_context arg:self arguments arg Assign Call Assign While Compare Assign Call While Compare If Call Assign Assign If Assign Call If BoolOp Compare Call Raise Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "make_eager_backend_with_torch_function_modes",
    "source_code": "def make_eager_backend_with_torch_function_modes(modes):\n    from contextlib import ExitStack\n\n    def fn(gm, fake_tensor_inputs, **kwargs):\n        stack = ExitStack()\n        for mode in modes:\n            stack.enter_context(mode)\n        result = gm.forward\n        stack.close()\n        return result\n    return fn",
    "docstring": "Used to trace HOPs (cond and while) for eager exectution, the metadata TF mode mutates vars outside of the scope of the HOP, and we can't have graph breaks in the HOP, so we need to externally run this mode and not trace it.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\backends\\debugging.py",
    "ast_data": "FunctionDef name:make_eager_backend_with_torch_function_modes arg:modes arguments arg FunctionDef name:fn arg:gm arg:fake_tensor_inputs arguments arg arg arg Assign Call For Call Assign Call Return return:yes Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "_settings_are_valid",
    "source_code": "def _settings_are_valid(self) -> bool:\n    for uri_template, values in self.feeds.items():\n        if values['batch_item_count'] and (not re.search('%\\\\(batch_time\\\\)s|%\\\\(batch_id\\\\)', uri_template)):\n            logger.error('%%(batch_time)s or %%(batch_id)d must be in the feed URI (%s) if FEED_EXPORT_BATCH_ITEM_COUNT setting or FEEDS.batch_item_count is specified and greater than 0. For more info see: https://docs.scrapy.org/en/latest/topics/feed-exports.html#feed-export-batch-item-count', uri_template)\n            return False\n    return True",
    "docstring": "If FEED_EXPORT_BATCH_ITEM_COUNT setting or FEEDS.batch_item_count is specified uri has to contain %(batch_time)s or %(batch_id)d to distinguish different files of partial output",
    "type": "method",
    "file_path": "scrapy\\scrapy\\extensions\\feedexport.py",
    "ast_data": "FunctionDef name:_settings_are_valid arg:self arguments arg For Call If BoolOp Call Call Return return:yes Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_xy1",
    "source_code": "def get_xy1(self):\n    return self._xy1",
    "docstring": "Return the *xy1* value of the line.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_xy1 arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "optimize",
    "source_code": "def optimize(self) -> None:\n    self.model = onnxscript_apis.optimize(self.model)",
    "docstring": "Optimize the ONNX model. This method optimizes the ONNX model by performing constant folding and eliminating redundancies in the graph. The optimization is done in-place.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:optimize arg:self arguments arg Assign Call"
  },
  {
    "library": "scipy",
    "name": "pg_series",
    "source_code": "def pg_series(k, z, n):\n    return sympy.diff(dg_series(z, n + k), z, k)",
    "docstring": "Symbolic expansion of polygamma(k, z) in z=0 to order n.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\wright_bessel.py",
    "ast_data": "FunctionDef name:pg_series arg:k arg:z arg:n arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_backward_hooks",
    "source_code": "def _get_backward_hooks(self):\n    full_backward_hooks: list[Callable] = []\n    if _global_is_full_backward_hook is True:\n        full_backward_hooks += _global_backward_hooks.values()\n    if self._is_full_backward_hook is True:\n        full_backward_hooks += self._backward_hooks.values()\n    non_full_backward_hooks: list[Callable] = []\n    if _global_is_full_backward_hook is False:\n        non_full_backward_hooks += _global_backward_hooks.values()\n    if self._is_full_backward_hook is False:\n        non_full_backward_hooks += self._backward_hooks.values()\n    return (full_backward_hooks, non_full_backward_hooks)",
    "docstring": "Return the backward hooks for use in the call function. It returns two lists, one with the full backward hooks and one with the non-full backward hooks.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:_get_backward_hooks arg:self arguments arg If Compare Call If Compare Call If Compare Call If Compare Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_format_graph_code",
    "source_code": "def _format_graph_code(name, filename, graph_str):\n    return f'TRACED GRAPH\\n {name} {filename} {graph_str}\\n'",
    "docstring": "Returns a string that formats the graph code.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\_utils.py",
    "ast_data": "FunctionDef name:_format_graph_code arg:name arg:filename arg:graph_str arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_generate_info_dict",
    "source_code": "def _generate_info_dict(self, model: GraphModule) -> dict[str, dict]:\n    info_dict: dict[str, dict] = {}\n    for fqn, module in model.named_modules():\n        if self._supports_report_gen(module):\n            pre_obs: ModelReportObserver = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME)\n            num_batches: torch.Tensor = pre_obs.percentile_batches_tracked\n            average_ratios: torch.Tensor = pre_obs.average_percentile_ratio\n            channel_batch_cnts: torch.Tensor = pre_obs.constant_channels\n            total_batches: int = pre_obs.num_batches_tracked\n            max_vals: torch.Tensor = pre_obs.max_val\n            for index, ratio_val in enumerate(average_ratios):\n                if ratio_val.item() < 0:\n                    average_ratios[index] = -ratio_val\n                if ratio_val.item() < 1:\n                    average_ratios[index] = 1 / ratio_val\n            outlier_calcs = self._calculate_outlier_info(average_ratios, num_batches, total_batches)\n            info_dict[fqn] = {self.CHANNEL_AXIS_KEY: self.ch_axis, self.REF_PERCENTILE_KEY: self.reference_percentile, self.RATIO_THRES_KEY: self.ratio_threshold, self.COMP_METRIC_KEY: average_ratios, self.NUM_BATCHES_KEY: num_batches, self.OUTLIER_KEY: outlier_calcs[self.OUTLIER_KEY], self.IS_SUFFICIENT_BATCHES_KEY: outlier_calcs[self.IS_SUFFICIENT_BATCHES_KEY], self.CONSTANT_COUNTS_KEY: channel_batch_cnts, self.MAX_VALS_KEY: max_vals}\n    return info_dict",
    "docstring": "Helper function for generate_detector_report that does the generation of the dictionary. This process is done as specified in generate_detector_report documentation Args: model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers Returns a dict mapping relevant module fqns to: whether there were outliers found in activation before the number of batches used for each channel whether fraction of applicable batches used is above fraction_batches_used_threshold their p_r metric compared to the threshold the threshold used to make the recommendation the reference_percentile used to make the recommendation the channel axis used to determine individual channels the constant batch counts per channel the per channel max values",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:_generate_info_dict arg:self arg:model arguments arg arg For Call If Call Call For Call If Compare Call Assign If Compare Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "wait",
    "source_code": "def wait(self) -> None:\n    if self.alive():\n        self.process.wait()\n    self.close()",
    "docstring": "Wait for the child process to exit.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\autotune_process.py",
    "ast_data": "FunctionDef name:wait arg:self arguments arg If Call Call Call"
  },
  {
    "library": "scipy",
    "name": "check_grad",
    "source_code": "@_transition_to_rng('seed', position_num=6)\ndef check_grad(func, grad, x0, *args, epsilon=_epsilon, direction='all', rng=None):\n    step = epsilon\n    x0 = np.asarray(x0)\n\n    def g(w, func, x0, v, *args):\n        return func(x0 + w * v, *args)\n    if direction == 'random':\n        _grad = np.asanyarray(grad(x0, *args))\n        if _grad.ndim > 1:\n            raise ValueError(\"'random' can only be used with scalar valued func\")\n        rng_gen = check_random_state(rng)\n        v = rng_gen.standard_normal(size=x0.shape)\n        _args = (func, x0, v) + args\n        _func = g\n        vars = np.zeros((1,))\n        analytical_grad = np.dot(_grad, v)\n    elif direction == 'all':\n        _args = args\n        _func = func\n        vars = x0\n        analytical_grad = grad(x0, *args)\n    else:\n        raise ValueError(f'{direction} is not a valid string for ``direction`` argument')\n    return np.sqrt(np.sum(np.abs((analytical_grad - approx_fprime(vars, _func, step, *_args)) ** 2)))",
    "docstring": "Check the correctness of a gradient function by comparing it against a (forward) finite-difference approximation of the gradient. Parameters ---------- func : callable `funcgradfuncfuncgradgradfuncgradfuncnumpy.random.Generatorrngnumpy.random.Generatornumpy.random.Generatornumpy.random.default_rngrngdirection'random'gradx0`. See Also -------- approx_fprime Examples -------- >>> import numpy as np >>> def func(x): ... return x[0]**2 - 0.5 * x[1]**3 >>> def grad(x): ... return [2 * x[0], -1.5 * x[1]**2] >>> from scipy.optimize import check_grad >>> check_grad(func, grad, [1.5, -1.5]) 2.9802322387695312e-08 # may vary >>> rng = np.random.default_rng() >>> check_grad(func, grad, [1.5, -1.5], ... direction='random', seed=rng) 2.9802322387695312e-08",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:check_grad arg:func arg:grad arg:x0 arguments arg arg arg arg arg arg arg Assign Assign Call FunctionDef name:g arg:w arg:func arg:x0 arg:v arguments arg arg arg arg arg Return return:yes Call If Compare Assign Call Call If Compare Raise Call Assign Call Assign Call Assign Assign Assign Call Assign Call If Compare Assign Assign Assign Assign Call Raise Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "container",
    "source_code": "@tf_export(v1=['container'])\ndef container(container_name) -> ContextManager[str]:\n    return get_default_graph().container(container_name)",
    "docstring": "Wrapper for using the default graph. Args: container_name: The container string to use in the context. Returns: A context manager that specifies the default container to use for newly created stateful ops.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:container arg:container_name arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_color",
    "source_code": "def set_color(self, c):\n    self._shared_setter('edgecolor', c)\n    self._shared_setter('facecolor', c)",
    "docstring": "Set the edgecolor of the rectangle and the connectors, and the facecolor for the rectangle. Parameters ---------- c : :mpltype:",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\inset.py",
    "ast_data": "FunctionDef name:set_color arg:self arg:c arguments arg arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "AxesY",
    "source_code": "class AxesY(_Base):\n\n    def __init__(self, axes, aspect=1.0, ref_ax=None):\n        self._axes = axes\n        self._aspect = aspect\n        if aspect == 'axes' and ref_ax is None:\n            raise ValueError(\"ref_ax must be set when aspect='axes'\")\n        self._ref_ax = ref_ax\n\n    def get_size(self, renderer):\n        l1, l2 = self._axes.get_ylim()\n        if self._aspect == 'axes':\n            ref_aspect = _get_axes_aspect(self._ref_ax)\n            aspect = _get_axes_aspect(self._axes)\n        else:\n            aspect = self._aspect\n        rel_size = abs(l2 - l1) * aspect\n        abs_size = 0.0\n        return (rel_size, abs_size)",
    "docstring": "Scaled size whose relative part corresponds to the data height of the *axes* multiplied by the *aspect*.",
    "type": "class",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py",
    "ast_data": "ClassDef name:AxesY FunctionDef name:__init__ arg:self arg:axes arg:aspect arg:ref_ax arguments arg arg arg arg Assign Assign If BoolOp Compare Compare Raise Call Assign FunctionDef name:get_size arg:self arg:renderer arguments arg arg Assign Call If Compare Assign Call Assign Call Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "get_key",
    "source_code": "def get_key(self, uri: URI) -> ConnectionKeyT:\n    return (b'http-proxy', self._proxy_uri.host, self._proxy_uri.port)",
    "docstring": "We use the proxy uri instead of uri obtained from request url",
    "type": "method",
    "file_path": "scrapy\\scrapy\\core\\http2\\agent.py",
    "ast_data": "FunctionDef name:get_key arg:self arg:uri arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reduce_join",
    "source_code": "@dispatch.dispatch_for_api(string_ops.reduce_join_v2)\ndef reduce_join(inputs: ragged_tensor.Ragged, axis=None, keepdims=None, separator='', name=None):\n    return ragged_math_ops.ragged_reduce_aggregate(string_ops.reduce_join, string_ops.unsorted_segment_join, inputs, axis, keepdims, separator, name or 'RaggedSegmentJoin')",
    "docstring": "For docs, see: _RAGGED_REDUCE_DOCSTRING.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_string_ops.py",
    "ast_data": "FunctionDef name:reduce_join arg:inputs arg:axis arg:keepdims arg:separator arg:name arguments arg arg arg arg arg Return return:yes Call BoolOp Call"
  },
  {
    "library": "pandas",
    "name": "_maybe_pin_freq",
    "source_code": "@final\ndef _maybe_pin_freq(self, freq, validate_kwds: dict) -> None:\n    if freq is None:\n        self._freq = None\n    elif freq == 'infer':\n        if self._freq is None:\n            self._freq = to_offset(self.inferred_freq)\n    elif freq is lib.no_default:\n        pass\n    elif self._freq is None:\n        freq = to_offset(freq)\n        type(self)._validate_frequency(self, freq, **validate_kwds)\n        self._freq = freq\n    else:\n        freq = to_offset(freq)\n        _validate_inferred_freq(freq, self._freq)",
    "docstring": "Constructor helper to pin the appropriate attribute. Assumes that self._freq is currently set to any freq inferred in _from_sequence_not_strict.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_maybe_pin_freq arg:self arg:freq arg:validate_kwds arguments arg arg arg If Compare Assign If Compare If Compare Assign Call If Compare If Compare Assign Call Call Call Assign Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "NDArrayBackedExtensionIndex",
    "source_code": "class NDArrayBackedExtensionIndex(ExtensionIndex):\n    _data: NDArrayBackedExtensionArray\n\n    def _get_engine_target(self) -> np.ndarray:\n        return self._data._ndarray\n\n    def _from_join_target(self, result: np.ndarray) -> ArrayLike:\n        assert result.dtype == self._data._ndarray.dtype\n        return self._data._from_backing_data(result)",
    "docstring": "Index subclass for indexes backed by NDArrayBackedExtensionArray.",
    "type": "class",
    "file_path": "pandas\\pandas\\core\\indexes\\extension.py",
    "ast_data": "ClassDef name:NDArrayBackedExtensionIndex FunctionDef name:_get_engine_target arg:self arguments arg Return return:yes FunctionDef name:_from_join_target arg:self arg:result arguments arg arg Compare Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_freq_domain_conv",
    "source_code": "def _freq_domain_conv(xp, in1, in2, axes, shape, calc_fast_len=False):\n    if not len(axes):\n        return in1 * in2\n    complex_result = xp.isdtype(in1.dtype, 'complex floating') or xp.isdtype(in2.dtype, 'complex floating')\n    if calc_fast_len:\n        fshape = [sp_fft.next_fast_len(shape[a], not complex_result) for a in axes]\n    else:\n        fshape = shape\n    if not complex_result:\n        fft, ifft = (sp_fft.rfftn, sp_fft.irfftn)\n    else:\n        fft, ifft = (sp_fft.fftn, sp_fft.ifftn)\n    if xp.isdtype(in1.dtype, 'integral'):\n        in1 = xp.astype(in1, xp.float64)\n    if xp.isdtype(in2.dtype, 'integral'):\n        in2 = xp.astype(in2, xp.float64)\n    sp1 = fft(in1, fshape, axes=axes)\n    sp2 = fft(in2, fshape, axes=axes)\n    ret = ifft(sp1 * sp2, fshape, axes=axes)\n    if calc_fast_len:\n        fslice = tuple([slice(sz) for sz in shape])\n        ret = ret[fslice]\n    return ret",
    "docstring": "Convolve two arrays in the frequency domain. This function implements only base the FFT-related operations. Specifically, it converts the signals to the frequency domain, multiplies them, then converts them back to the time domain. Calculations of axes, shapes, convolution mode, etc. are implemented in higher level-functions, such as and . Those functions should be used instead of this one. Parameters ---------- in1 : array_like First input. in2 : array_like Second input. Should have the same number of dimensions as . axes : array_like of ints Axes over which to compute the FFTs. shape : array_like of ints The sizes of the FFTs. calc_fast_len : bool, optional If , set each value of to the next fast FFT length. Default is , use as-is. Returns ------- out : array An N-dimensional array containing the discrete linear convolution of with .",
    "type": "function",
    "file_path": "scipy\\scipy\\signal\\_signaltools.py",
    "ast_data": "FunctionDef name:_freq_domain_conv arg:xp arg:in1 arg:in2 arg:axes arg:shape arg:calc_fast_len arguments arg arg arg arg arg arg If Call Return return:yes Assign BoolOp Call Call If Assign Call Assign If Assign Assign If Call Assign Call If Call Assign Call Assign Call Assign Call Assign Call If Assign Call Call Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_extend_path",
    "source_code": "def _extend_path(self, path, mutation_size=10):\n    (x0, y0), (x1, y1) = path.vertices[-2:]\n    theta = math.atan2(y1 - y0, x1 - x0)\n    x2 = x1 + math.cos(theta) * mutation_size\n    y2 = y1 + math.sin(theta) * mutation_size\n    if path.codes is None:\n        return Path(np.concatenate([path.vertices, [[x2, y2]]]))\n    else:\n        return Path(np.concatenate([path.vertices, [[x2, y2]]]), np.concatenate([path.codes, [Path.LINETO]]))",
    "docstring": "Extend the path to make a room for drawing arrow.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axisline_style.py",
    "ast_data": "FunctionDef name:_extend_path arg:self arg:path arg:mutation_size arguments arg arg arg Assign Assign Call Assign Call Assign Call If Compare Return return:yes Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "dump_backend_repro_as_file",
    "source_code": "def dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy=False):\n    curdir = os.getcwd()\n    subdir = os.path.join(os.getcwd(), 'checkpoints')\n    if not os.path.exists(subdir):\n        os.makedirs(subdir, exist_ok=True)\n    file_name = os.path.join(subdir, f'minified_{len(gm.graph.nodes)}_nodes.py')\n    log.warning('Writing checkpoint with %s nodes to %s', len(gm.graph.nodes), file_name)\n    with open(file_name, 'w') as fd:\n        fd.write(generate_dynamo_fx_repro_string(gm, args, compiler_name, check_accuracy, save_dir=subdir))\n    latest_repro = os.path.join(curdir, 'repro.py')\n    log.warning('Copying %s to %s for convenience', file_name, latest_repro)\n    if use_buck:\n        BuckTargetWriter(latest_repro).write()\n    shutil.copyfile(file_name, latest_repro)",
    "docstring": "Saves the repro to a repro.py file",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\repro\\after_dynamo.py",
    "ast_data": "FunctionDef name:dump_backend_repro_as_file arg:gm arg:args arg:compiler_name arg:check_accuracy arguments arg arg arg arg Assign Call Assign Call Call If Call Call Assign Call Call Call Call With Call Call Call Assign Call Call If Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_merge_inner_shape",
    "source_code": "def _merge_inner_shape(inner_shape: tensor_lib.Tensor, static_inner_shape: tensor_shape.TensorShape, outer_axis: int, inner_axis: int) -> Tuple[tensor_lib.Tensor, tensor_shape.TensorShape]:\n    prefix = inner_shape[:outer_axis]\n    suffix = inner_shape[inner_axis + 1:]\n    internal = inner_shape[outer_axis:inner_axis + 1]\n    internal_value = [_reduce_prod_patch(internal)]\n    new_internal = array_ops.concat([prefix, internal_value, suffix], axis=0)\n    prefix_static = static_inner_shape[:outer_axis]\n    suffix_static = static_inner_shape[inner_axis + 1:]\n    internal_static = static_inner_shape[outer_axis:inner_axis + 1]\n    internal_value_static = tensor_shape.TensorShape([internal_static.num_elements()])\n    new_internal_static = prefix_static + internal_value_static + suffix_static\n    return (new_internal, new_internal_static)",
    "docstring": "Merge the inner shape of a DynamicRaggedShape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_merge_inner_shape arg:inner_shape arg:static_inner_shape arg:outer_axis arg:inner_axis arguments arg arg arg arg Assign Assign Assign Assign Call Assign Call Assign Assign Assign Assign Call Call Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_jpeg_quality_to_scale",
    "source_code": "def _jpeg_quality_to_scale(compression_strength: Tensor) -> Tensor:\n    scale: Tensor = differentiable_polynomial_floor(torch.where(compression_strength < 50, 5000.0 / compression_strength, 200.0 - 2.0 * compression_strength))\n    return scale",
    "docstring": "Convert a given JPEG quality to the scaling factor. Args: compression_strength (Tensor): Compression strength ranging from 0 to 100. Any shape is supported. Returns: scale (Tensor): Scaling factor to be applied to quantization matrix. Same shape as input.",
    "type": "function",
    "file_path": "kornia\\kornia\\enhance\\jpeg.py",
    "ast_data": "FunctionDef name:_jpeg_quality_to_scale arg:compression_strength arguments arg Call Call Compare Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "strip_accents_ascii",
    "source_code": "def strip_accents_ascii(s):\n    nkfd_form = unicodedata.normalize('NFKD', s)\n    return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')",
    "docstring": "Transform accentuated unicode symbols into ascii or nothing. Warning: this solution is only suited for languages that have a direct transliteration to ASCII symbols. Parameters ---------- s : str The string to strip. Returns ------- s : str The stripped string. See Also -------- strip_accents_unicode : Remove accentuated char for any unicode symbol.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:strip_accents_ascii arg:s arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_push_tape",
    "source_code": "def _push_tape(self):\n    if self._recording:\n        raise ValueError('Tape is still recording, This can happen if you try to re-enter an already-active tape.')\n    if self._tape is None:\n        self._tape = tape.push_new_tape(persistent=self._persistent, watch_accessed_variables=self._watch_accessed_variables)\n    else:\n        tape.push_tape(self._tape)\n    self._recording = True",
    "docstring": "Pushes a new tape onto the tape stack.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\backprop.py",
    "ast_data": "FunctionDef name:_push_tape arg:self arguments arg If Raise Call If Compare Assign Call Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "put",
    "source_code": "def put(self, closure, tag=None):\n    closure.tag = tag\n    if tag is not None:\n        with self._queue_lock:\n            self._tagged_queue[tag].put(closure, block=False)\n            self._closures_queued_condition.notify_all()\n    else:\n        with self._put_wait_lock, self._queue_lock:\n            self._queue_free_slot_condition.wait_for(lambda: not self._queue.full())\n            self._queue.put(closure, block=False)\n            metric_utils.monitor_int('queued_closures', self._queue.qsize())\n            self._raise_if_error()\n            self._closures_queued_condition.notify()",
    "docstring": "Put a closure into the queue for later execution. If was called before , the error from the first invocation of will be raised. Args: closure: The to put into the queue. tag: if not None, put into a queue with the given tag.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:put arg:self arg:closure arg:tag arguments arg arg arg Assign If Compare With Call Call With Call arguments Call Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "mul",
    "source_code": "def mul(self, other, level: Level | None=None, fill_value: float | None=None, axis: Axis=0) -> Series:\n    return self._flex_method(other, operator.mul, level=level, fill_value=fill_value, axis=axis)",
    "docstring": "Return Multiplication of series and other, element-wise (binary operator ). Equivalent to `Python documentation `_ for more details. Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=[\"a\", \"b\", \"c\", \"d\"]) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=[\"a\", \"b\", \"d\", \"e\"]) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.multiply(b, fill_value=0) a 1.0 b 0.0 c 0.0 d 0.0 e NaN dtype: float64 >>> a.mul(5, fill_value=0) a 5.0 b 5.0 c 5.0 d 0.0 dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:mul arg:self arg:other arg:level arg:fill_value arg:axis arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_to_bchw",
    "source_code": "def _to_bchw(tensor: Tensor) -> Tensor:\n    if not isinstance(tensor, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(tensor)}')\n    if len(tensor.shape) < 2:\n        raise ValueError(f'Input size must be a two, three or four dimensional tensor. Got {tensor.shape}')\n    if len(tensor.shape) == 2:\n        tensor = tensor.unsqueeze(0)\n    if len(tensor.shape) == 3:\n        tensor = tensor.unsqueeze(0)\n    if len(tensor.shape) > 4:\n        tensor = tensor.view(-1, tensor.shape[-3], tensor.shape[-2], tensor.shape[-1])\n    return tensor",
    "docstring": "Convert a PyTorch tensor image to BCHW format. Args: tensor (torch.Tensor): image of the form :math:. Returns: input tensor of the form :math:.",
    "type": "function",
    "file_path": "kornia\\kornia\\utils\\image.py",
    "ast_data": "FunctionDef name:_to_bchw arg:tensor arguments arg If Call Raise Call Call If Compare Call Raise Call If Compare Call Assign Call If Compare Call Assign Call If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_SetDefaultAttrValues",
    "source_code": "def _SetDefaultAttrValues(node_def, op_def):\n    assert node_def.op == op_def.name\n    for attr_def in op_def.attr:\n        key = attr_def.name\n        if attr_def.HasField('default_value'):\n            value = node_def.attr[key]\n            if value is None or value.WhichOneof('value') is None:\n                node_def.attr[key].CopyFrom(attr_def.default_value)",
    "docstring": "Set any default attr values in that aren't present.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\importer.py",
    "ast_data": "FunctionDef name:_SetDefaultAttrValues arg:node_def arg:op_def arguments arg arg Compare For Assign If Call Assign If BoolOp Compare Compare Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, x, pos=None):\n    return self.fmt % x",
    "docstring": "Return the formatted label string. Only the value *x* is formatted. The position is ignored.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:x arg:pos arguments arg arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_script_dict",
    "source_code": "def create_script_dict(obj):\n    return torch._C.ScriptDict(obj)",
    "docstring": "Create a `` and can be passed between Python and TorchScript with reference semantics and zero copy overhead.",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\_script.py",
    "ast_data": "FunctionDef name:create_script_dict arg:obj arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_fused_kernels_supported_devices",
    "source_code": "def _get_fused_kernels_supported_devices() -> list[str]:\n    return ['mps', 'cuda', 'xpu', 'hpu', 'cpu', torch._C._get_privateuse1_backend_name()]",
    "docstring": "Return the device type list that supports fused kernels in optimizer.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_foreach_utils.py",
    "ast_data": "FunctionDef name:_get_fused_kernels_supported_devices arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "max_pool1d",
    "source_code": "@tf_export('nn.max_pool1d')\n@dispatch.add_dispatch_support\ndef max_pool1d(input, ksize, strides, padding, data_format='NWC', name=None):\n    with ops.name_scope(name, 'MaxPool1d', [input]) as name:\n        if isinstance(padding, (list, tuple)) and data_format == 'NCHW_VECT_C':\n            raise ValueError(f\"`data_format='NCHW_VECT_C'` is not supported with explicit padding. Received: padding={padding}\")\n        if data_format is None:\n            data_format = 'NWC'\n        channel_index = 1 if data_format.startswith('NC') else 2\n        ksize = [1] + _get_sequence(ksize, 1, channel_index, 'ksize')\n        strides = [1] + _get_sequence(strides, 1, channel_index, 'strides')\n        padding, explicit_paddings = convert_padding(padding, 3)\n        if padding == 'EXPLICIT':\n            explicit_paddings = [0, 0] + explicit_paddings\n        expanding_dim = 1 if data_format == 'NWC' else 2\n        data_format = 'NHWC' if data_format == 'NWC' else 'NCHW'\n        input = array_ops.expand_dims_v2(input, expanding_dim)\n        result = gen_nn_ops.max_pool(input, ksize=ksize, strides=strides, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, name=name)\n        return array_ops.squeeze(result, expanding_dim)",
    "docstring": "Performs the max pooling on the input. Note internally this op reshapes and uses the underlying 2d operation. Args: input: A 3-D of the format specified by . ksize: An int or list of that has length or . The size of the window for each dimension of the input tensor. strides: An int or list of that has length or . The stride of the sliding window for each dimension of the input tensor. padding: Either the or indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. See [here]( for more information. When explicit padding is used and data_format is , this should be in the form . When explicit padding used and data_format is , this should be in the form . When using explicit padding, the size of the paddings cannot be greater than the sliding window size. data_format: An optional string from: \"NWC\", \"NCW\". Defaults to \"NWC\". name: A name for the operation (optional). Returns: A of format specified by . The max pooled output tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:max_pool1d arg:input arg:ksize arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg With Call If BoolOp Call Compare Raise Call If Compare Assign Assign Call Assign Call Assign Call Assign Call If Compare Assign Assign Compare Assign Compare Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "patch_forward",
    "source_code": "@contextmanager\ndef patch_forward(obj: torch.nn.Module, new_method):\n    original_method = obj.forward\n    obj.forward = new_method.__get__(obj, obj.__class__)\n    try:\n        yield\n    finally:\n        obj.forward = original_method",
    "docstring": "Helper method to make it easier to cleanly torch.export() a method on a module that is not .",
    "type": "function",
    "file_path": "pytorch\\torch\\export\\_trace.py",
    "ast_data": "FunctionDef name:patch_forward arg:obj arg:new_method arguments arg arg Assign Assign Call Try Assign"
  },
  {
    "library": "pytorch",
    "name": "apply_match",
    "source_code": "def apply_match(modules: dict[str, nn.ModuleDict], pattern: Union[tuple[Any], Any], node: Node, matched_node_pattern: list[Node]) -> Optional[list[Node]]:\n    if isinstance(pattern, tuple):\n        if len(pattern) == 1:\n            if _match(modules, node, pattern[0]):\n                return matched_node_pattern + [node]\n        first, *rest = pattern\n        if _match(modules, node, first):\n            if rest is None:\n                return matched_node_pattern + [node]\n            for user in node.users:\n                return apply_match(modules, tuple(rest), user, matched_node_pattern + [node])\n    elif _match(modules, node, pattern):\n        return [node]\n    return None",
    "docstring": "This function will return the matched nodes if the pattern matches the node given If there is no match, it will return None",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\match_utils.py",
    "ast_data": "FunctionDef name:apply_match arg:modules arg:pattern arg:node arg:matched_node_pattern arguments arg arg arg arg If Call If Compare Call If Call Return return:yes Assign If Call If Compare Return return:yes For Return return:yes Call Call If Call Return return:yes Return return:no"
  },
  {
    "library": "matplotlib",
    "name": "RendererTemplate",
    "source_code": "class RendererTemplate(RendererBase):\n\n    def __init__(self, dpi):\n        super().__init__()\n        self.dpi = dpi\n\n    def draw_path(self, gc, path, transform, rgbFace=None):\n        pass\n\n    def draw_image(self, gc, x, y, im):\n        pass\n\n    def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):\n        pass\n\n    def flipy(self):\n        return True\n\n    def get_canvas_width_height(self):\n        return (100, 100)\n\n    def get_text_width_height_descent(self, s, prop, ismath):\n        return (1, 1, 1)\n\n    def new_gc(self):\n        return GraphicsContextTemplate()\n\n    def points_to_pixels(self, points):\n        return points",
    "docstring": "The renderer handles drawing/rendering operations. This is a minimal do-nothing class that can be used to get started when writing a new backend. Refer to for documentation of the methods.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_template.py",
    "ast_data": "ClassDef name:RendererTemplate FunctionDef name:__init__ arg:self arg:dpi arguments arg arg Call Call Assign FunctionDef name:draw_path arg:self arg:gc arg:path arg:transform arg:rgbFace arguments arg arg arg arg arg FunctionDef name:draw_image arg:self arg:gc arg:x arg:y arg:im arguments arg arg arg arg arg FunctionDef name:draw_text arg:self arg:gc arg:x arg:y arg:s arg:prop arg:angle arg:ismath arg:mtext arguments arg arg arg arg arg arg arg arg arg FunctionDef name:flipy arg:self arguments arg Return return:yes FunctionDef name:get_canvas_width_height arg:self arguments arg Return return:yes FunctionDef name:get_text_width_height_descent arg:self arg:s arg:prop arg:ismath arguments arg arg arg arg Return return:yes FunctionDef name:new_gc arg:self arguments arg Return return:yes Call FunctionDef name:points_to_pixels arg:self arg:points arguments arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "op",
    "source_code": "@property\ndef op(self) -> ops.Operation:\n    return self.handle.op",
    "docstring": "The op for this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:op arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "rpartition",
    "source_code": "def rpartition(self, sep):\n    return asarray(rpartition(self, sep))",
    "docstring": "Partition each element in around . See Also -------- rpartition",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:rpartition arg:self arg:sep arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "are_long_distant_nodes",
    "source_code": "def are_long_distant_nodes(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode) -> bool:\n    proximity_score = max(abs(node1.min_order - node2.max_order), abs(node2.min_order - node1.max_order))\n    return proximity_score > 64",
    "docstring": "This function prevents fusion for nodes that can increase memory footprint. This problem is more common in horizontal fusion, where nodes that are far apart in the original order get fused, lengthening the live intervals of tensors. This is very evident in models with activation checkpointing, where the recomputed nodes from different checkpointed regions get fused and significantly increase the memory footprint. The current attempt is a quick, possibly hacky, heuristic to prevent the fusion of nodes that are far away in the original order. A better but difficult to implement heurisitic would be to use live intervals of the buffers, find region of peak pressure in the original program and prevent fusion that crosses that peak region. We might need special care or good approximation in this implementation, as fusion of node changes live intervals, and re-computing live intervals and peak memory after each fusion can introduce large compilation overhead.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\scheduler.py",
    "ast_data": "FunctionDef name:are_long_distant_nodes arg:self arg:node1 arg:node2 arguments arg arg arg Assign Call Call Call Return return:yes Compare"
  },
  {
    "library": "matplotlib",
    "name": "set_verts_and_codes",
    "source_code": "def set_verts_and_codes(self, verts, codes):\n    if len(verts) != len(codes):\n        raise ValueError(\"'codes' must be a 1D list or array with the same length of 'verts'\")\n    self._paths = [mpath.Path(xy, cds) if len(xy) else mpath.Path(xy) for xy, cds in zip(verts, codes)]\n    self.stale = True",
    "docstring": "Initialize vertices with path codes.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_verts_and_codes arg:self arg:verts arg:codes arguments arg arg arg If Compare Call Call Raise Call Assign Call Call Call Call Assign"
  },
  {
    "library": "pytorch",
    "name": "permute_tensor",
    "source_code": "def permute_tensor(self: torch.Tensor, src_dst: list[int], group: RANK_TYPES, tag: str='') -> torch.Tensor:\n    t, rankset, group_size = _expand_group(group, tag)\n    local_pg = c10d._find_or_create_pg_by_ranks_and_tag(t, rankset, group_size)\n    output_split_sizes = [0] * group_size\n    input_split_sizes = [0] * group_size\n    for src, dst in enumerate(src_dst):\n        if src == dist.get_rank(local_pg):\n            input_split_sizes[dst] = self.numel()\n        if dst == dist.get_rank(local_pg):\n            output_split_sizes[src] = self.numel()\n    return all_to_all_single(self, output_split_sizes, input_split_sizes, group, tag)",
    "docstring": "Permutes the elements of the tensor according to the given source/destination pairs. should be defined such that src_dst[m] == n means m sends to n. Group can be one of: List[int]: ranks participating in the collective. List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. ProcessGroup: Will perform a collective using the ranks and tag of the PG. DeviceMesh: Do a SPMD collective over all ranks of the mesh (DeviceMesh, int): Do a MPMD collective over one",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:permute_tensor arg:self arg:src_dst arg:group arg:tag arguments arg arg arg arg Assign Call Assign Call Assign Assign For Call If Compare Call Assign Call If Compare Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "MissingOpProfile",
    "source_code": "class MissingOpProfile(RuntimeError):\n    pass",
    "docstring": "This is raised when we don't have an operator profile available for the given inputs.",
    "type": "class",
    "file_path": "pytorch\\torch\\_library\\fake_profile.py",
    "ast_data": "ClassDef name:MissingOpProfile"
  },
  {
    "library": "tensorflow",
    "name": "_variable_shape",
    "source_code": "@abc.abstractproperty\ndef _variable_shape(self):\n    pass",
    "docstring": "of , without batch dimension.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:_variable_shape arg:self arguments arg"
  },
  {
    "library": "scipy",
    "name": "isspmatrix_bsr",
    "source_code": "def isspmatrix_bsr(x):\n    return isinstance(x, bsr_matrix)",
    "docstring": "Is of a bsr_matrix type? Parameters ---------- x object to check for being a bsr matrix Returns ------- bool True if is a bsr matrix, False otherwise Examples -------- >>> from scipy.sparse import bsr_array, bsr_matrix, csr_matrix, isspmatrix_bsr >>> isspmatrix_bsr(bsr_matrix([[5]])) True >>> isspmatrix_bsr(bsr_array([[5]])) False >>> isspmatrix_bsr(csr_matrix([[5]])) False",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_bsr.py",
    "ast_data": "FunctionDef name:isspmatrix_bsr arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_y",
    "source_code": "def _check_y(y, multi_output=False, y_numeric=False, estimator=None):\n    if multi_output:\n        y = check_array(y, accept_sparse='csr', ensure_all_finite=True, ensure_2d=False, dtype=None, input_name='y', estimator=estimator)\n    else:\n        estimator_name = _check_estimator_name(estimator)\n        y = column_or_1d(y, warn=True)\n        _assert_all_finite(y, input_name='y', estimator_name=estimator_name)\n        _ensure_no_complex_data(y)\n    if y_numeric and hasattr(y.dtype, 'kind') and (y.dtype.kind == 'O'):\n        y = y.astype(np.float64)\n    return y",
    "docstring": "Isolated part of check_X_y dedicated to y validation",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_check_y arg:y arg:multi_output arg:y_numeric arg:estimator arguments arg arg arg arg If Assign Call Assign Call Assign Call Call Call If BoolOp Call Compare Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "projection",
    "source_code": "def projection(self, point: Tensor) -> Tensor:\n    return self.origin + self.direction @ (point - self.origin) * self.direction",
    "docstring": "Return the projection of a point onto the line. Args: point: the point to be projected.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\line.py",
    "ast_data": "FunctionDef name:projection arg:self arg:point arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_ensure_handler",
    "source_code": "@functools.cache\ndef _ensure_handler():\n    handler = logging.StreamHandler()\n    handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))\n    _log.addHandler(handler)\n    return handler",
    "docstring": "The first time this function is called, attach a using the same format as to the Matplotlib root logger. Return this handler every time this function is called.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:_ensure_handler arguments Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_WindowDataset",
    "source_code": "class _WindowDataset(dataset_ops.UnaryDataset):\n\n    def __init__(self, input_dataset, size, shift, stride, drop_remainder, name=None):\n        self._input_dataset = input_dataset\n        self._size = ops.convert_to_tensor(size, dtype=dtypes.int64, name='size')\n        self._shift = ops.convert_to_tensor(shift, dtype=dtypes.int64, name='shift')\n        self._stride = ops.convert_to_tensor(stride, dtype=dtypes.int64, name='stride')\n        self._drop_remainder = ops.convert_to_tensor(drop_remainder, dtype=dtypes.bool, name='drop_remainder')\n        self._structure = nest.pack_sequence_as(dataset_ops.get_legacy_output_classes(input_dataset), [dataset_ops.DatasetSpec(structure.convert_legacy_structure(output_type, output_shape, output_class)) for output_class, output_shape, output_type in zip(nest.flatten(dataset_ops.get_legacy_output_classes(input_dataset)), nest.flatten(dataset_ops.get_legacy_output_shapes(input_dataset)), nest.flatten(dataset_ops.get_legacy_output_types(input_dataset)))])\n        self._name = name\n        variant_tensor = gen_dataset_ops.window_dataset(input_dataset._variant_tensor, size=self._size, shift=self._shift, stride=self._stride, drop_remainder=self._drop_remainder, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)\n\n    @property\n    def element_spec(self):\n        return self._structure",
    "docstring": "A dataset that creates window datasets from the input elements.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\window_op.py",
    "ast_data": "ClassDef name:_WindowDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:size arg:shift arg:stride arg:drop_remainder arg:name arguments arg arg arg arg arg arg arg Assign Assign Call Assign Call Assign Call Assign Call Assign Call Call Call Call Call Call Call Call Call Call Call Assign Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_lookup",
    "source_code": "def _lookup(self, x=None, y=None, kwargs=None):\n    mapping = _Mapping(x=x, y=y, kwargs=kwargs)\n    if mapping.x is not None:\n        return self._from_x.get(mapping.x_key, mapping)\n    if mapping.y is not None:\n        return self._from_y.get(mapping.y_key, mapping)\n    return mapping",
    "docstring": "Helper which retrieves mapping info from forward/inverse dicts.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_lookup arg:self arg:x arg:y arg:kwargs arguments arg arg arg arg Assign Call If Compare Return return:yes Call If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "ShearX",
    "source_code": "class ShearX(OperationBase):\n\n    @staticmethod\n    def _process_magnitude(magnitude: Tensor) -> Tensor:\n        return magnitude * 180\n\n    def __init__(self, initial_magnitude: Optional[float]=0.1, initial_probability: float=0.5, magnitude_range: Tuple[float, float]=(0.0, 0.3), temperature: float=0.1, symmetric_megnitude: bool=True) -> None:\n        if symmetric_megnitude and magnitude_range[0] < 0:\n            raise ValueError(f'Lower bound of {self.__class__.__name__} is a symmetric operation. The lower bound must above 0. Got {magnitude_range[0]}.')\n        super().__init__(K.RandomShear(magnitude_range, same_on_batch=False, p=initial_probability, align_corners=True), initial_magnitude=[('shear_x', initial_magnitude)], temperature=temperature, symmetric_megnitude=symmetric_megnitude, magnitude_fn=ShearX._process_magnitude)",
    "docstring": "Apply shear operation along x-axis. Args: initial_magnitude: the initial magnitude. initial_probability: the initial probability. If None, the augmentation will be randomly applied according to he augmentation sampling range. magnitude_range: the sampling range for random sampling and clamping the optimized magnitude. temperature: temperature for RelaxedBernoulli distribution used during training. symmetric_megnitude: if to randomly assign the magnitude as negative or not.",
    "type": "class",
    "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py",
    "ast_data": "ClassDef name:ShearX FunctionDef name:_process_magnitude arg:magnitude arguments arg Return return:yes FunctionDef name:__init__ arg:self arg:initial_magnitude arg:initial_probability arg:magnitude_range arg:temperature arg:symmetric_megnitude arguments arg arg arg arg arg arg If BoolOp Compare Raise Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_MatmulExtractingThreeDiagonals",
    "source_code": "def _MatmulExtractingThreeDiagonals(x, y_tr):\n    diag = math_ops.reduce_sum(x * y_tr, axis=-1)\n    if y_tr.shape.is_fully_defined():\n        zeros = array_ops.zeros(list(x.shape[:-2]) + [1, x.shape[-1]], dtype=x.dtype)\n        superdiag = math_ops.reduce_sum(x * array_ops.concat((y_tr[..., 1:, :], zeros), axis=-2), axis=-1)\n        subdiag = math_ops.reduce_sum(x * array_ops.concat((zeros, y_tr[..., :-1, :]), axis=-2), axis=-1)\n    else:\n        rank = array_ops.rank(y_tr)\n        zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)\n        superdiag_pad = array_ops.concat((zeros, array_ops.constant([[0, 1], [0, 0]])), axis=0)\n        superdiag = math_ops.reduce_sum(x * array_ops.pad(y_tr[..., 1:, :], superdiag_pad), axis=-1)\n        subdiag_pad = array_ops.concat((zeros, array_ops.constant([[1, 0], [0, 0]])), axis=0)\n        subdiag = math_ops.reduce_sum(x * array_ops.pad(y_tr[..., :-1, :], subdiag_pad), axis=-1)\n    return array_ops_stack.stack([superdiag, diag, subdiag], axis=-2)",
    "docstring": "Multiplies matrices and extracts three diagonals from the product. With sizes M x K and K x M, this function takes O(MK) time and O(M) space, while using math_ops.matmul, and then extracting the diagonals would take O(M^2 K) time and O(M^2) space. Args: x: first matrix y_tr: second matrix transposed Returns: Diagonals of the product in compact format (see linalg_ops.tridiagonal_solve)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg_grad.py",
    "ast_data": "FunctionDef name:_MatmulExtractingThreeDiagonals arg:x arg:y_tr arguments arg arg Assign Call If Call Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "disconnect",
    "source_code": "def disconnect(self, receiver=None, sender=None, dispatch_uid=None):\n    if dispatch_uid:\n        lookup_key = (dispatch_uid, _make_id(sender))\n    else:\n        lookup_key = (_make_id(receiver), _make_id(sender))\n    disconnected = False\n    with self.lock:\n        self._clear_dead_receivers()\n        for index in range(len(self.receivers)):\n            r_key, *_ = self.receivers[index]\n            if r_key == lookup_key:\n                disconnected = True\n                del self.receivers[index]\n                break\n        self.sender_receivers_cache.clear()\n    return disconnected",
    "docstring": "Disconnect receiver from sender for signal. If weak references are used, disconnect need not be called. The receiver will be removed from dispatch automatically. Arguments: receiver The registered receiver to disconnect. May be none if dispatch_uid is specified. sender The registered sender to disconnect dispatch_uid the unique identifier of the receiver to disconnect",
    "type": "method",
    "file_path": "django\\django\\dispatch\\dispatcher.py",
    "ast_data": "FunctionDef name:disconnect arg:self arg:receiver arg:sender arg:dispatch_uid arguments arg arg arg arg If Assign Call Assign Call Call Assign With Call For Call Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "float_operation",
    "source_code": "@staticmethod\ndef float_operation():\n    return {'max_depth': 10000, 'min_bytes': 0, 'min_micros': 0, 'min_params': 0, 'min_float_ops': 1, 'min_occurrence': 0, 'order_by': 'float_ops', 'account_type_regexes': ['.*'], 'start_name_regexes': ['.*'], 'trim_name_regexes': [], 'show_name_regexes': ['.*'], 'hide_name_regexes': [], 'account_displayed_op_only': True, 'select': ['float_ops'], 'step': -1, 'output': 'stdout'}",
    "docstring": "Options used to profile float operations. Please see on the caveats of calculating float operations. Returns: A dict of profiling options.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py",
    "ast_data": "FunctionDef name:float_operation arguments Return return:yes"
  },
  {
    "library": "pygame",
    "name": "as_machine_type",
    "source_code": "def as_machine_type(size):\n    if size == 32:\n        return 'x86'\n    if size == 64:\n        return 'x64'\n    raise ValueError('Unknown pointer size {}'.format(size))",
    "docstring": "Return pointer bit size as a Windows machine type",
    "type": "function",
    "file_path": "pygame\\buildconfig\\config_msys2.py",
    "ast_data": "FunctionDef name:as_machine_type arg:size arguments arg If Compare Return return:yes If Compare Return return:yes Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_acs_underlying_tensor",
    "source_code": "def _get_acs_underlying_tensor(self):\n    return self.elem",
    "docstring": "This method enables _functional_collectives_impl to test if a tensor is an ACS",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py",
    "ast_data": "FunctionDef name:_get_acs_underlying_tensor arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "typecode",
    "source_code": "def typecode(self):\n    return self._typecode",
    "docstring": "Return the typecode of the variable. Returns ------- typecode : char The character typecode of the variable (e.g., 'i' for int).",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\_netcdf.py",
    "ast_data": "FunctionDef name:typecode arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "_assign_extended_slice_rebuild",
    "source_code": "def _assign_extended_slice_rebuild(self, start, stop, step, valueList):\n    indexList = range(start, stop, step)\n    if len(valueList) != len(indexList):\n        raise ValueError('attempt to assign sequence of size %d to extended slice of size %d' % (len(valueList), len(indexList)))\n    newLen = len(self)\n    newVals = dict(zip(indexList, valueList))\n\n    def newItems():\n        for i in range(newLen):\n            if i in newVals:\n                yield newVals[i]\n            else:\n                yield self._get_single_internal(i)\n    self._rebuild(newLen, newItems())",
    "docstring": "Assign an extended slice by rebuilding entire list",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\mutable_list.py",
    "ast_data": "FunctionDef name:_assign_extended_slice_rebuild arg:self arg:start arg:stop arg:step arg:valueList arguments arg arg arg arg arg Assign Call If Compare Call Call Raise Call Call Call Assign Call Assign Call Call FunctionDef name:newItems arguments For Call If Compare Call Call Call"
  },
  {
    "library": "django",
    "name": "copy_exception",
    "source_code": "def copy_exception(exc, backend=None):\n    backend = backend or exc.backend\n    new = exc.__class__(*exc.args, tried=exc.tried, backend=backend, chain=exc.chain)\n    if hasattr(exc, 'template_debug'):\n        new.template_debug = exc.template_debug\n    return new",
    "docstring": "Create a new TemplateDoesNotExist. Preserve its declared attributes and template debug data but discard __traceback__, __context__, and __cause__ to make this object suitable for keeping around (in a cache, for example).",
    "type": "function",
    "file_path": "django\\django\\template\\backends\\django.py",
    "ast_data": "FunctionDef name:copy_exception arg:exc arg:backend arguments arg arg Assign BoolOp Assign Call If Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "run",
    "source_code": "def run(self, fx_graph_module: torch.fx.GraphModule, onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher, parent_onnxscript_graph: onnxscript_graph_building.TorchScriptGraph | None=None) -> onnxscript_graph_building.TorchScriptGraph:\n    if parent_onnxscript_graph is not None:\n        onnx_meta: _pass.GraphModuleOnnxMeta | None = fx_graph_module.meta.get('onnx')\n        if onnx_meta is None:\n            raise RuntimeError(f'ONNX meta is not found in submodule {fx_graph_module._get_name()}. Only submodules produced by `Modularize` pass is supported in ONNX export.')\n        onnx_domain = onnx_meta.package_info.to_onnx_domain_string()\n    else:\n        onnx_domain = None\n    onnxscript_graph = onnxscript_graph_building.TorchScriptGraph(parent_onnxscript_graph, domain_name=onnx_domain)\n    onnxscript_tracer = onnxscript_graph_building.TorchScriptTracingEvaluator(onnxscript_graph)\n    fx_name_to_onnxscript_value: dict[str, onnxscript_graph_building.TorchScriptTensor | tuple[onnxscript_graph_building.TorchScriptTensor, ...]] = {}\n    with torch.utils._mode_utils.no_dispatch():\n        for node in fx_graph_module.graph.nodes:\n            self.run_node(node, fx_graph_module, onnxfunction_dispatcher, onnxscript_graph, onnxscript_tracer, fx_name_to_onnxscript_value)\n    return onnxscript_graph",
    "docstring": "Analyze all FX nodes and trigger their ONNX translation. Args: fx_graph_module: FX graph module to be translated. onnxfunction_dispatcher: ONNX function dispatcher. parent_onnxscript_graph: The parent TorchScript graph. Must be provided if is a submodule. If not provided, is assumed to be the root module.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\fx_onnx_interpreter.py",
    "ast_data": "FunctionDef name:run arg:self arg:fx_graph_module arg:onnxfunction_dispatcher arg:parent_onnxscript_graph arguments arg arg arg arg If Compare Call If Compare Raise Call Call Assign Call Assign Assign Call Assign Call With Call For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "OptionalXlaContext",
    "source_code": "class OptionalXlaContext:\n\n    def __init__(self, is_compiled):\n        wrap = is_compiled and (not control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()))\n        self.xla_context = control_flow_ops.XLAControlFlowContext() if wrap else None\n\n    def __enter__(self):\n        if self.xla_context:\n            self.xla_context.Enter()\n\n    def __exit__(self, t, value, traceback):\n        if self.xla_context:\n            self.xla_context.Exit()",
    "docstring": "Wrapper for XLA context optionally applied under a context manager.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "ClassDef name:OptionalXlaContext FunctionDef name:__init__ arg:self arg:is_compiled arguments arg arg Assign BoolOp Call Call Assign Call FunctionDef name:__enter__ arg:self arguments arg If Call FunctionDef name:__exit__ arg:self arg:t arg:value arg:traceback arguments arg arg arg arg If Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_assert_dtype",
    "source_code": "def _maybe_assert_dtype(self, x):\n    if self.dtype is not None and self.dtype.base_dtype != x.dtype.base_dtype:\n        raise TypeError('Input had dtype %s but expected %s.' % (self.dtype, x.dtype))",
    "docstring": "Helper to check dtype when self.dtype is known.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py",
    "ast_data": "FunctionDef name:_maybe_assert_dtype arg:self arg:x arguments arg arg If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "set_edgecolor",
    "source_code": "def set_edgecolor(self, c):\n    if isinstance(c, str) and c.lower() in ('none', 'face'):\n        c = c.lower()\n    self._original_edgecolor = c\n    self._set_edgecolor(c)",
    "docstring": "Set the edgecolor(s) of the collection. Parameters ---------- c : :mpltype: or list of :mpltype: or 'face' The collection edgecolor(s). If a sequence, the patches cycle through it. If 'face', match the facecolor.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:set_edgecolor arg:self arg:c arguments arg arg If BoolOp Call Compare Call Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "_build_and_solve_system",
    "source_code": "def _build_and_solve_system(y, d, smoothing, kernel, epsilon, powers):\n    lhs, rhs, shift, scale = _build_system(y, d, smoothing, kernel, epsilon, powers)\n    _, _, coeffs, info = dgesv(lhs, rhs, overwrite_a=True, overwrite_b=True)\n    if info < 0:\n        raise ValueError(f'The {-info}-th argument had an illegal value.')\n    elif info > 0:\n        msg = 'Singular matrix.'\n        nmonos = powers.shape[0]\n        if nmonos > 0:\n            pmat = _polynomial_matrix((y - shift) / scale, powers)\n            rank = np.linalg.matrix_rank(pmat)\n            if rank < nmonos:\n                msg = f'Singular matrix. The matrix of monomials evaluated at the data point coordinates does not have full column rank ({rank}/{nmonos}).'\n        raise LinAlgError(msg)\n    return (shift, scale, coeffs)",
    "docstring": "Build and solve the RBF interpolation system of equations. Parameters ---------- y : (P, N) float ndarray Data point coordinates. d : (P, S) float ndarray Data values at . smoothing : (P,) float ndarray Smoothing parameter for each data point. kernel : str Name of the RBF. epsilon : float Shape parameter. powers : (R, N) int ndarray The exponents for each monomial in the polynomial. Returns ------- coeffs : (P + R, S) float ndarray Coefficients for each RBF and monomial. shift : (N,) float ndarray Domain shift used to create the polynomial matrix. scale : (N,) float ndarray Domain scaling used to create the polynomial matrix.",
    "type": "function",
    "file_path": "scipy\\scipy\\interpolate\\_rbfinterp.py",
    "ast_data": "FunctionDef name:_build_and_solve_system arg:y arg:d arg:smoothing arg:kernel arg:epsilon arg:powers arguments arg arg arg arg arg arg Assign Call Assign Call If Compare Raise Call If Compare Assign Assign If Compare Assign Call Assign Call If Compare Assign Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "send_object_list",
    "source_code": "@_exception_logger\ndef send_object_list(object_list: list[Any], dst: Optional[int]=None, group: Optional[ProcessGroup]=None, device: Optional[torch.device]=None, group_dst: Optional[int]=None):\n    group = _group_or_default_group(group)\n    group_dst = _canonicalize_group_rank(group, dst, group_dst)\n    _check_not_self_rank(group, group_dst, 'destination')\n    if _rank_not_in_group(group):\n        _warn_not_in_group('send_object_list')\n        return\n    current_device = device or _get_object_coll_device(group)\n    tensor_list, size_list = zip(*[_object_to_tensor(obj, current_device, group) for obj in object_list])\n    object_sizes_tensor = torch.cat(size_list)\n    send(object_sizes_tensor, group_dst=group_dst, group=group)\n    if len(tensor_list) == 1:\n        object_tensor = tensor_list[0]\n    else:\n        object_tensor = torch.cat(tensor_list)\n    send(object_tensor, group_dst=group_dst, group=group)",
    "docstring": "Sends picklable objects in `sendobject_collectivessend_object_listsend_object_listsend` instead. Example:: >>> # xdoctest: +SKIP(\"need process group init\") >>> # Note: Process group initialization omitted on each rank. >>> import torch.distributed as dist >>> # Assumes backend is not NCCL >>> device = torch.device(\"cpu\") >>> if dist.get_rank() == 0: >>> # Assumes world_size of 2. >>> objects = [\"foo\", 12, {1: 2}] # any picklable object >>> dist.send_object_list(objects, dst=1, device=device) >>> else: >>> objects = [None, None, None] >>> dist.recv_object_list(objects, src=0, device=device) >>> objects ['foo', 12, {1: 2}]",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:send_object_list arg:object_list arg:dst arg:group arg:device arg:group_dst arguments arg arg arg arg arg Assign Call Assign Call Call If Call Call Return return:no Assign BoolOp Call Assign Call Call Assign Call Call If Compare Call Assign Assign Call Call"
  },
  {
    "library": "pandas",
    "name": "_copy",
    "source_code": "def _copy(self, deepcopy: bool=False) -> Styler:\n    styler = type(self)(self.data)\n    shallow = ['hide_index_', 'hide_columns_', 'hide_column_names', 'hide_index_names', 'table_attributes', 'cell_ids', 'caption', 'uuid', 'uuid_len', 'template_latex', 'template_html_style', 'template_html_table', 'template_html']\n    deep = ['css', 'concatenated', '_display_funcs', '_display_funcs_index', '_display_funcs_columns', '_display_funcs_index_names', '_display_funcs_column_names', 'hidden_rows', 'hidden_columns', 'ctx', 'ctx_index', 'ctx_columns', 'cell_context', '_todo', 'table_styles', 'tooltips']\n    for attr in shallow:\n        setattr(styler, attr, getattr(self, attr))\n    for attr in deep:\n        val = getattr(self, attr)\n        setattr(styler, attr, copy.deepcopy(val) if deepcopy else val)\n    return styler",
    "docstring": "Copies a Styler, allowing for deepcopy or shallow copy Copying a Styler aims to recreate a new Styler object which contains the same data and styles as the original. Data dependent attributes [copied and NOT exported]: - formatting (._display_funcs) - hidden index values or column values (.hidden_rows, .hidden_columns) - tooltips - cell_context (cell css classes) - ctx (cell css styles) - caption - concatenated stylers Non-data dependent attributes [copied and exported]: - css - hidden index state and hidden columns state (.hide_index_, .hide_columns_) - table_attributes - table_styles - applied styles (_todo)",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\style.py",
    "ast_data": "FunctionDef name:_copy arg:self arg:deepcopy arguments arg arg Assign Call Call Assign Assign For Call Call For Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "implicit_replication",
    "source_code": "@contextmanager\ndef implicit_replication() -> Iterator[None]:\n    try:\n        DTensor._op_dispatcher._allow_implicit_replication = True\n        yield\n    finally:\n        DTensor._op_dispatcher._allow_implicit_replication = False",
    "docstring": "This context manager allows :class: to implicitly treat all non-DTensors (`DTensor` s are not replicated in practice, please use it at your discretion.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\__init__.py",
    "ast_data": "FunctionDef name:implicit_replication arguments Try Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "unwrap_output_dict",
    "source_code": "def unwrap_output_dict(strategy, grouped_outputs, mode):\n    if mode == ModeKeys.PREDICT:\n        return flatten_per_replica_values(strategy, grouped_outputs)\n    total_loss = strategy.reduce(reduce_util.ReduceOp.SUM, grouped_outputs['total_loss'][0], axis=None)\n    output_losses = flatten_per_replica_values(strategy, grouped_outputs['output_losses'])\n    metrics = flatten_per_replica_values(strategy, grouped_outputs['metrics'])\n    batch_size = strategy.reduce(reduce_util.ReduceOp.SUM, grouped_outputs['batch_size'], axis=None)\n    if backend.is_tpu_strategy(strategy) and ops.executing_eagerly_outside_functions():\n        output_losses = output_losses[::strategy.num_replicas_in_sync]\n        metrics = metrics[::strategy.num_replicas_in_sync]\n    return {'total_loss': [total_loss], 'output_losses': output_losses, 'metrics': metrics, 'batch_size': batch_size}",
    "docstring": "Unwrap the list of outputs contained in the PerReplica parameters.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:unwrap_output_dict arg:strategy arg:grouped_outputs arg:mode arguments arg arg arg If Compare Return return:yes Call Assign Call Assign Call Assign Call Assign Call If BoolOp Call Call Assign Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "create_valid_python_identifier",
    "source_code": "def create_valid_python_identifier(name: str) -> str:\n    if name.isidentifier() and (not iskeyword(name)):\n        return name\n    gen = ((c, ''.join((chr(b) for b in c.encode('ascii', 'backslashreplace')))) for c in name)\n    name = ''.join((c_escaped.replace('\\\\', '_UNICODE_' if c != c_escaped else '_BACKSLASH_') for c, c_escaped in gen))\n    special_characters_replacements = {char: f'_{token.tok_name[tokval]}_' for char, tokval in tokenize.EXACT_TOKEN_TYPES.items()}\n    special_characters_replacements.update({' ': '_', '?': '_QUESTIONMARK_', '!': '_EXCLAMATIONMARK_', '$': '_DOLLARSIGN_', '€': '_EUROSIGN_', '°': '_DEGREESIGN_', \"'\": '_SINGLEQUOTE_', '\"': '_DOUBLEQUOTE_', '#': '_HASH_', '`': '_BACKTICK_'})\n    name = ''.join([special_characters_replacements.get(char, char) for char in name])\n    name = f'BACKTICK_QUOTED_STRING_{name}'\n    if not name.isidentifier():\n        raise SyntaxError(f\"Could not convert '{name}' to a valid Python identifier.\")\n    return name",
    "docstring": "Create valid Python identifiers from any string. Check if name contains any special characters. If it contains any special characters, the special characters will be replaced by a special string and a prefix is added. Raises ------ SyntaxError If the returned name is not a Python valid identifier, raise an exception.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\parsing.py",
    "ast_data": "FunctionDef name:create_valid_python_identifier arg:name arguments arg If BoolOp Call Call Return return:yes Assign Call Call Call Assign Call Call Compare Assign Call Call Assign Call Call Assign If Call Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_shared_setter",
    "source_code": "def _shared_setter(self, prop, val):\n    setattr(self, f'_{prop}', val)\n    artist.setp([self._rectangle, *self._connectors], prop, val)",
    "docstring": "Helper function to set the same style property on the artist and its children.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\inset.py",
    "ast_data": "FunctionDef name:_shared_setter arg:self arg:prop arg:val arguments arg arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "aps06_f",
    "source_code": "def aps06_f(x, n):\n    return 2 * x * np.exp(-n) - 2 * np.exp(-n * x) + 1",
    "docstring": "Exponential rapidly changing from -1 to 1 at x=0",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_tstutils.py",
    "ast_data": "FunctionDef name:aps06_f arg:x arg:n arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "GetControlPivot",
    "source_code": "def GetControlPivot(self):\n    return None",
    "docstring": "Returns the pivot node for this context, or None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:GetControlPivot arg:self arguments arg Return return:no"
  },
  {
    "library": "django",
    "name": "_check_formset",
    "source_code": "def _check_formset(self, obj):\n    if not _issubclass(obj.formset, BaseModelFormSet):\n        return must_inherit_from(parent='BaseModelFormSet', option='formset', obj=obj, id='admin.E206')\n    else:\n        return []",
    "docstring": "Check formset is a subclass of BaseModelFormSet.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_formset arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._dtype",
    "docstring": "The dtype of all s in this object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_gen_gradient_func",
    "source_code": "def _gen_gradient_func(func):\n\n    def gradient_func(unused_op, *result_grads):\n        result_grads = [x if x is not None else default_gradient.zeros_like(t) for x, t in zip(result_grads, func.graph.inputs)]\n        return func(*result_grads)\n    return gradient_func",
    "docstring": "Wraps a ConcreteFunction to be compatible with the gradient registry.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\core\\function\\transform\\transform.py",
    "ast_data": "FunctionDef name:_gen_gradient_func arg:func arguments arg FunctionDef name:gradient_func arg:unused_op arguments arg arg Assign Compare Call Call Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_var_to_const_function_in_v1",
    "source_code": "def convert_var_to_const_function_in_v1(func, lower_control_flow=True, aggressive_inlining=False):\n    session = ops.get_default_session()\n    if session is None:\n        raise RuntimeError('The conversion must be carried out in a Session context.')\n    converter_data = _FunctionConverterDataInGraph(func=func, lower_control_flow=lower_control_flow, aggressive_inlining=aggressive_inlining, session=session)\n    output_graph_def, converted_input_indices = _replace_variables_by_constants(converter_data=converter_data)\n    return _construct_concrete_function(func, output_graph_def, converted_input_indices)",
    "docstring": "Replaces all the variables in a graph with constants of the same values. This function works as same as convert_variables_to_constants_v2, but it should be used in Graph mode. It is a temporary solution when users want to integrate their models written in TF2 with infra that requires TF1 mode. The current implementation only works for graphs that do not contain any control flow or embedding related ops. The function must be called in a Session context. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) aggressive_inlining: Boolean indicating whether or not to do aggressive function inlining (might be unsafe if function has stateful ops, not properly connected to control outputs). (default False) Raises: RuntimeError: If no Session context is present. Returns: ConcreteFunction containing a simplified version of the original.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:convert_var_to_const_function_in_v1 arg:func arg:lower_control_flow arg:aggressive_inlining arguments arg arg arg Assign Call If Compare Raise Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "SessionRunValues",
    "source_code": "@tf_export(v1=['train.SessionRunValues'])\nclass SessionRunValues(collections.namedtuple('SessionRunValues', ['results', 'options', 'run_metadata'])):\n    pass",
    "docstring": "Contains the results of . In the future we may use this object to add more information about result of run without changing the Hook API. Args: results: The return values from corresponding to the fetches attribute returned in the RunArgs. Note that this has the same shape as the RunArgs fetches. For example: fetches = global_step_tensor => results = nparray(int) fetches = [train_op, summary_op, global_step_tensor] => results = [None, nparray(string), nparray(int)] fetches = {'step': global_step_tensor, 'summ': summary_op} => results = {'step': nparray(int), 'summ': nparray(string)} options: from the call. run_metadata: from the call.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py",
    "ast_data": "ClassDef name:SessionRunValues Call Call"
  },
  {
    "library": "cherrypy",
    "name": "clear",
    "source_code": "def clear(self):\n    raise NotImplementedError",
    "docstring": "Reset the cache to its initial, empty state.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\caching.py",
    "ast_data": "FunctionDef name:clear arg:self arguments arg Raise"
  },
  {
    "library": "pandas",
    "name": "setitem",
    "source_code": "def setitem(self, indexer, value) -> Self:\n    if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim:\n        raise ValueError(f'Cannot set values with ndim > {self.ndim}')\n    if not self._has_no_reference(0):\n        if self.ndim == 2 and isinstance(indexer, tuple):\n            blk_loc = self.blklocs[indexer[1]]\n            if is_list_like(blk_loc) and blk_loc.ndim == 2:\n                blk_loc = np.squeeze(blk_loc, axis=0)\n            elif not is_list_like(blk_loc):\n                blk_loc = [blk_loc]\n            if len(blk_loc) == 0:\n                return self.copy(deep=False)\n            values = self.blocks[0].values\n            if values.ndim == 2:\n                values = values[blk_loc]\n                self._iset_split_block(0, blk_loc, values)\n                self.blocks[0].setitem((indexer[0], np.arange(len(blk_loc))), value)\n                return self\n        self = self.copy()\n    return self.apply('setitem', indexer=indexer, value=value)",
    "docstring": "Set values with indexer. For SingleBlockManager, this backs s[indexer] = value",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\internals\\managers.py",
    "ast_data": "FunctionDef name:setitem arg:self arg:indexer arg:value arguments arg arg arg If BoolOp Call Compare Raise Call If Call If BoolOp Compare Call Assign If BoolOp Call Compare Assign Call If Call Assign If Compare Call Return return:yes Call Assign If Compare Assign Call Call Call Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "__init__",
    "source_code": "def __init__(self, a, b):\n    if a > b:\n        a, b = (b, a)\n    self.a = a\n    self.b = b",
    "docstring": "domain_check_interval(a,b)(x) = true where x b",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:a arg:b arguments arg arg arg If Compare Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "main_op_with_restore",
    "source_code": "@tf_export(v1=['saved_model.main_op_with_restore', 'saved_model.main_op.main_op_with_restore'])\n@deprecation.deprecated(None, _DEPRECATION_MSG)\ndef main_op_with_restore(restore_op_name):\n    with ops.control_dependencies([main_op()]):\n        main_op_with_restore = control_flow_ops.group(restore_op_name)\n    return main_op_with_restore",
    "docstring": "Returns a main op to init variables, tables and restore the graph. Returns the main op including the group of ops that initializes all variables, initialize local variables, initialize all tables and the restore op name. Args: restore_op_name: Name of the op to use to restore the graph. Returns: The set of ops to be run as part of the main op upon the load operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\main_op_impl.py",
    "ast_data": "FunctionDef name:main_op_with_restore arg:restore_op_name arguments arg With Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_unbacked_replacements",
    "source_code": "def _get_unbacked_replacements(self) -> dict[Expr, Expr]:\n    if self.unbacked_replacements is not None:\n        return self.unbacked_replacements\n    self.unbacked_replacements = {}\n    for assertions in self.shape_env.deferred_runtime_asserts.values():\n        for assertion in assertions:\n            if not isinstance(assertion.expr, sympy.Equality):\n                continue\n            lhs, rhs = (assertion.expr.lhs, assertion.expr.rhs)\n            l2r = lhs.compare(rhs) == 1\n            src = lhs if l2r else rhs\n            dst = rhs if l2r else lhs\n            existing_replacement = self.unbacked_replacements.get(src, None)\n            if existing_replacement and isinstance(existing_replacement, sympy.Symbol):\n                continue\n            self.unbacked_replacements[src] = dst\n    return self.unbacked_replacements",
    "docstring": "This helps with covering unbacked symint cases where you may have two expressions: s0 + u0 and u1. And s0 + u0 is known to be equal to u1 via deferred_runtime_asserts. For example in atomically_apply_size_hint, it must return the same size hint for both s0 + u0 and u1, but it first needs to know they are equal. Then it can substitute s0 + u0 for u1.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:_get_unbacked_replacements arg:self arguments arg If Compare Return return:yes Assign For Call For If Call Assign Assign Compare Call Assign Assign Assign Call If BoolOp Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_is_level_reference",
    "source_code": "@final\ndef _is_level_reference(self, key: Level, axis: Axis=0) -> bool:\n    axis_int = self._get_axis_number(axis)\n    return key is not None and is_hashable(key) and (key in self.axes[axis_int].names) and (not self._is_label_reference(key, axis=axis_int))",
    "docstring": "Test whether a key is a level reference for a given axis. To be considered a level reference, must be a string that: - (axis=0): Matches the name of an index level and does NOT match a column label. - (axis=1): Matches the name of a column level and does NOT match an index label. Parameters ---------- key : Hashable Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_level : bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\generic.py",
    "ast_data": "FunctionDef name:_is_level_reference arg:self arg:key arg:axis arguments arg arg arg Assign Call Return return:yes BoolOp Compare Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "event_size",
    "source_code": "@property\ndef event_size(self):\n    return self._event_size",
    "docstring": "Scalar tensor: the number of classes.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\categorical.py",
    "ast_data": "FunctionDef name:event_size arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "prob",
    "source_code": "def prob(self, value, name='prob'):\n    return self._call_prob(value, name)",
    "docstring": "Probability density/mass function. Args: value: or . name: Python prepended to names of ops created by this function. Returns: prob: a of shape with values of type .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:prob arg:self arg:value arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "best_probas_and_indices",
    "source_code": "def best_probas_and_indices(class_probas: Any) -> str:\n    probas_indices_sorted = sorted([(proba, index) for index, proba in enumerate(class_probas) if proba > 0], key=lambda x: x[0], reverse=True)\n    probas_indices_sorted_str = ', '.join((f'({value:.3f}, {index})' for value, index in probas_indices_sorted))\n    return f'[{probas_indices_sorted_str}]'",
    "docstring": "Given a list of tuples (proba, idx), this function returns a string in which the tuples are sorted by proba in descending order. E.g.: Given class_probas=[(0.3, 0), (0.5, 1), (0.2, 2)] this function returns \"[(0.5, 1), (0.3, 0), (0.2, 2)]\"",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\ah_tree.py",
    "ast_data": "FunctionDef name:best_probas_and_indices arg:class_probas arguments arg Assign Call Call Compare arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_structseq_instance",
    "source_code": "def is_structseq_instance(obj: object) -> bool:\n    return is_structseq_class(type(obj))",
    "docstring": "Return whether the object is an instance of PyStructSequence.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:is_structseq_instance arg:obj arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_no_new_objects",
    "source_code": "@trace.trace_wrapper\ndef assert_no_new_objects(self, threshold=None):\n    if not threshold:\n        threshold = {}\n    count_diff = self._snapshot_diff(0, -1)\n    original_count_diff = copy.deepcopy(count_diff)\n    count_diff.subtract(collections.Counter(threshold))\n    if max(count_diff.values() or [0]) > 0:\n        raise AssertionError(f'New Python objects created exceeded the threshold.\\nPython object threshold:\\n{threshold}\\n\\nNew Python objects:\\n{original_count_diff.most_common()}')\n    elif min(count_diff.values(), default=0) < 0:\n        logging.warning(f'New Python objects created were less than the threshold.\\nPython object threshold:\\n{threshold}\\n\\nNew Python objects:\\n{original_count_diff.most_common()}')",
    "docstring": "Assert no new Python objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\python_memory_checker.py",
    "ast_data": "FunctionDef name:assert_no_new_objects arg:self arg:threshold arguments arg arg If Assign Assign Call Assign Call Call Call If Compare Call BoolOp Call Raise Call Call If Compare Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_rotation",
    "source_code": "def get_rotation(self):\n    if self.get_transform_rotates_text():\n        return self.get_transform().transform_angles([self._rotation], [self.get_unitless_position()]).item(0)\n    else:\n        return self._rotation",
    "docstring": "Return the text angle in degrees between 0 and 360.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_rotation arg:self arguments arg If Call Return return:yes Call Call Call Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_quantile_to_level",
    "source_code": "def _quantile_to_level(self, data, quantile):\n    isoprop = np.asarray(quantile)\n    values = np.ravel(data)\n    sorted_values = np.sort(values)[::-1]\n    normalized_values = np.cumsum(sorted_values) / values.sum()\n    idx = np.searchsorted(normalized_values, 1 - isoprop)\n    levels = np.take(sorted_values, idx, mode='clip')\n    return levels",
    "docstring": "Return data levels corresponding to quantile cuts of mass.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\distributions.py",
    "ast_data": "FunctionDef name:_quantile_to_level arg:self arg:data arg:quantile arguments arg arg arg Assign Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "collapse_resume_frames",
    "source_code": "def collapse_resume_frames(stack: StackSummary) -> StackSummary:\n    new_stack = StackSummary()\n    for frame in stack:\n        if frame.filename is None:\n            continue\n        name = remove_resume_prefix(frame.name)\n        if new_stack and name and (new_stack[-1].name == name):\n            new_stack[-1] = frame\n            frame.name = name\n        else:\n            new_stack.append(frame)\n    return new_stack",
    "docstring": "When we graph break, we create a resume function and make a regular Python call to it, which gets intercepted by Dynamo. This behavior is normally shown in the traceback, which can be confusing to a user. So we can filter out resume frames for better traceback clarity. Example: File \"...\" line 3, in f File \"...\" line 5, in torch_dynamo_resume_in_f_at_80 File \"...\" line 10, in torch_dynamo_resume_in_f_at_120 becomes File \"...\" line 10, in f",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\exc.py",
    "ast_data": "FunctionDef name:collapse_resume_frames arg:stack arguments arg Assign Call For If Compare Assign Call If BoolOp Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "LinearGeometryMixin",
    "source_code": "class LinearGeometryMixin:\n\n    def interpolate(self, distance):\n        return self._topology(capi.geos_interpolate(self.ptr, distance))\n\n    def interpolate_normalized(self, distance):\n        return self._topology(capi.geos_interpolate_normalized(self.ptr, distance))\n\n    def project(self, point):\n        from .point import Point\n        if not isinstance(point, Point):\n            raise TypeError('locate_point argument must be a Point')\n        return capi.geos_project(self.ptr, point.ptr)\n\n    def project_normalized(self, point):\n        from .point import Point\n        if not isinstance(point, Point):\n            raise TypeError('locate_point argument must be a Point')\n        return capi.geos_project_normalized(self.ptr, point.ptr)\n\n    @property\n    def merged(self):\n        return self._topology(capi.geos_linemerge(self.ptr))\n\n    @property\n    def closed(self):\n        return capi.geos_isclosed(self.ptr)",
    "docstring": "Used for LineString and MultiLineString.",
    "type": "class",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "ClassDef name:LinearGeometryMixin FunctionDef name:interpolate arg:self arg:distance arguments arg arg Return return:yes Call Call FunctionDef name:interpolate_normalized arg:self arg:distance arguments arg arg Return return:yes Call Call FunctionDef name:project arg:self arg:point arguments arg arg If Call Raise Call Return return:yes Call FunctionDef name:project_normalized arg:self arg:point arguments arg arg If Call Raise Call Return return:yes Call FunctionDef name:merged arg:self arguments arg Return return:yes Call Call FunctionDef name:closed arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_to_row_partitions_and_nvals_from_lengths",
    "source_code": "def _to_row_partitions_and_nvals_from_lengths(lengths: Sequence[Union[int, Sequence[int]]], dtype=None) -> Tuple[Sequence[RowPartition], int]:\n    size_so_far = lengths[0]\n    result = []\n    for current_lengths in lengths[1:]:\n        if isinstance(current_lengths, int):\n            nrows = size_so_far\n            nvals = current_lengths * nrows\n            size_so_far = nvals\n            result.append(RowPartition.from_uniform_row_length(current_lengths, nvals, nrows=nrows, dtype_hint=dtype))\n        else:\n            if size_so_far != len(current_lengths):\n                raise ValueError('Shape not consistent.')\n            result.append(RowPartition.from_row_lengths(current_lengths, dtype_hint=dtype))\n            size_so_far = sum(current_lengths)\n    return (result, size_so_far)",
    "docstring": "Allow ragged and uniform shapes to be specified. For example, [2, [2,1], 2] represents a shape like: [[[0, 0], [0, 0]], [[0, 0]]] Args: lengths: a list of integers and lists of integers. dtype: dtype of the shape (tf.int32 or tf.int64) Returns: a sequence of RowPartitions, and the number of values of the last partition.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:_to_row_partitions_and_nvals_from_lengths arg:lengths arg:dtype arguments arg arg Assign Assign For If Call Assign Assign Assign Call Call If Compare Call Raise Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ConvReLU2d",
    "source_code": "class ConvReLU2d(nnqat.Conv2d, nni._FusedModule):\n    _FLOAT_MODULE: ClassVar[type[nn.Module]] = nni.ConvReLU2d\n    _FLOAT_CONV_MODULE: ClassVar[type[nn.Conv2d]] = nn.Conv2d\n    _FLOAT_BN_MODULE: ClassVar[Optional[type[nn.Module]]] = None\n    _FLOAT_RELU_MODULE: ClassVar[Optional[type[nn.Module]]] = nn.ReLU\n\n    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', qconfig=None):\n        super().__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, padding_mode=padding_mode, qconfig=qconfig)\n        assert qconfig, 'qconfig must be provided for QAT module'\n        self.qconfig = qconfig\n        self.weight_fake_quant = self.qconfig.weight()\n\n    def forward(self, input):\n        return F.relu(self._conv_forward(input, self.weight_fake_quant(self.weight), self.bias))\n\n    @classmethod\n    def from_float(cls, mod, use_precomputed_fake_quant=False):\n        return super().from_float(mod, use_precomputed_fake_quant=use_precomputed_fake_quant)",
    "docstring": "A ConvReLU2d module is a fused module of Conv2d and ReLU, attached with FakeQuantize modules for weight for quantization aware training. We combined the interface of :class: and :class:. Attributes: weight_fake_quant: fake quant module for weight",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\conv_fused.py",
    "ast_data": "ClassDef name:ConvReLU2d FunctionDef name:__init__ arg:self arg:in_channels arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arg:groups arg:bias arg:padding_mode arg:qconfig arguments arg arg arg arg arg arg arg arg arg arg arg Call Call Assign Assign Call FunctionDef name:forward arg:self arg:input arguments arg arg Return return:yes Call Call Call FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_case_helper",
    "source_code": "def _case_helper(cond_fn, pred_fn_pairs, default, exclusive, name, allow_python_preds=False, **cond_kwargs):\n    predicates, actions = _case_verify_and_canonicalize_args(pred_fn_pairs, exclusive, name, allow_python_preds)\n    with ops.name_scope(name, 'case', [predicates]):\n        if default is None:\n            default, predicates, actions = _case_create_default_action(predicates, actions)\n        fn = default\n        for predicate, action in reversed(list(zip(predicates, actions))):\n            fn = functools.partial(cond_fn, predicate, true_fn=action, false_fn=fn, **cond_kwargs)\n        if exclusive:\n            with ops.control_dependencies([_assert_at_most_n_true(predicates, n=1, msg='Input error: exclusive=True')]):\n                return fn()\n        else:\n            return fn()",
    "docstring": "Implementation of case that allows for different cond functions. Args: cond_fn: method that has signature and semantics of above. pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to . name: A name for this operation (optional). allow_python_preds: if true, pred_fn_pairs may contain Python bools in addition to boolean Tensors **cond_kwargs: keyword arguments that will be passed to . Returns: The tensors returned by the first pair whose predicate evaluated to True, or those returned by if none does. Raises: TypeError: If is not a list/dictionary. TypeError: If is a list but does not contain 2-tuples. TypeError: If is not callable for any i, or is not callable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_case.py",
    "ast_data": "FunctionDef name:_case_helper arg:cond_fn arg:pred_fn_pairs arg:default arg:exclusive arg:name arg:allow_python_preds arguments arg arg arg arg arg arg arg Assign Call With Call If Compare Assign Call Assign For Call Call Call Assign Call If With Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_symintnode",
    "source_code": "@record_shapeenv_event()\ndef create_symintnode(self, sym: sympy.Expr, *, hint: Optional[int], source: Optional[Source]=None) -> IntLikeType:\n    if self._translation_validation_enabled and source is not None:\n        symbol = self._create_symbol_for_source(source)\n        assert symbol is not None\n        fx_node = self._create_fx_placeholder_and_z3var(symbol, int)\n        self._add_assertion(sympy.Eq(symbol, sym))\n    else:\n        fx_node = None\n    out: IntLikeType\n    if isinstance(sym, sympy.Integer):\n        if hint is not None:\n            assert int(sym) == hint\n        out = int(sym)\n    else:\n        if free_unbacked_symbols(sym):\n            hint = None\n        out = SymInt(SymNode(sym, self, int, hint, fx_node=fx_node))\n    return out",
    "docstring": "Create a SymInt value from a symbolic expression If you know what the current hint value of the SymInt to be created is, pass it into hint. Otherwise, pass None and we will make our best guess",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:create_symintnode arg:self arg:sym arguments arg arg arg arg If BoolOp Compare Assign Call Compare Assign Call Call Call Assign If Call If Compare Compare Call Assign Call If Call Assign Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_linop_solve",
    "source_code": "def _linop_solve(self, left_operator: 'LinearOperatorInversion', right_operator: linear_operator.LinearOperator) -> linear_operator.LinearOperator:\n    return left_operator.operator.matmul(right_operator)",
    "docstring": "Solve inverse of generic s.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_inversion.py",
    "ast_data": "FunctionDef name:_linop_solve arg:self arg:left_operator arg:right_operator arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_NodeState",
    "source_code": "class _NodeState(object):\n\n    def __init__(self, init_from=None):\n        if init_from:\n            self.value = set(init_from)\n        else:\n            self.value = set()\n\n    def __eq__(self, other):\n        return self.value == other.value\n\n    def __ne__(self, other):\n        return self.value != other.value\n\n    def __or__(self, other):\n        assert isinstance(other, _NodeState)\n        result = _NodeState(self.value)\n        result.value.update(other.value)\n        return result\n\n    def __add__(self, value):\n        result = _NodeState(self.value)\n        result.value.add(value)\n        return result\n\n    def __repr__(self):\n        return 'NodeState[%s]=%s' % (id(self), repr(self.value))",
    "docstring": "Abstraction for the state of the CFG walk for reaching definition analysis. This is a value type. Only implements the strictly necessary operators. Attributes: value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and their possible definitions",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\reaching_fndefs.py",
    "ast_data": "ClassDef name:_NodeState FunctionDef name:__init__ arg:self arg:init_from arguments arg arg If Assign Call Assign Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes Compare FunctionDef name:__ne__ arg:self arg:other arguments arg arg Return return:yes Compare FunctionDef name:__or__ arg:self arg:other arguments arg arg Call Assign Call Call Return return:yes FunctionDef name:__add__ arg:self arg:value arguments arg arg Assign Call Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "_from_local_no_grad",
    "source_code": "def _from_local_no_grad(local_tensor: torch.Tensor, sharding_spec: DTensorSpec) -> DTensor:\n    if not compiled_autograd_enabled():\n        return DTensor(local_tensor, sharding_spec, requires_grad=local_tensor.requires_grad)\n    else:\n        return DTensor.from_local(local_tensor, sharding_spec.mesh, sharding_spec.placements, shape=sharding_spec.shape, stride=sharding_spec.stride)",
    "docstring": "This method is similar to `` except that in eager mode it avoids some CPU overhead by avoiding default args and not being differentiable.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_common.py",
    "ast_data": "FunctionDef name:_from_local_no_grad arg:local_tensor arg:sharding_spec arguments arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_prune_tree",
    "source_code": "def _prune_tree(self):\n    check_is_fitted(self)\n    if self.ccp_alpha == 0.0:\n        return\n    if is_classifier(self):\n        n_classes = np.atleast_1d(self.n_classes_)\n        pruned_tree = Tree(self.n_features_in_, n_classes, self.n_outputs_)\n    else:\n        pruned_tree = Tree(self.n_features_in_, np.array([1] * self.n_outputs_, dtype=np.intp), self.n_outputs_)\n    _build_pruned_tree_ccp(pruned_tree, self.tree_, self.ccp_alpha)\n    self.tree_ = pruned_tree",
    "docstring": "Prune tree using Minimal Cost-Complexity Pruning.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\tree\\_classes.py",
    "ast_data": "FunctionDef name:_prune_tree arg:self arguments arg Call If Compare Return return:no If Call Assign Call Assign Call Assign Call Call Call Assign"
  },
  {
    "library": "scipy",
    "name": "write_object",
    "source_code": "def write_object(self, arr):\n    self.write_header(matdims(arr, self.oned_as), mxOBJECT_CLASS)\n    self.write_element(np.array(arr.classname, dtype='S'), mdtype=miINT8)\n    self._write_items(arr)",
    "docstring": "Same as writing structs, except different mx class, and extra classname element after header",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:write_object arg:self arg:arr arguments arg arg Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "element_spec",
    "source_code": "@property\ndef element_spec(self):\n    raise NotImplementedError('DistributedDataset.element_spec must be implemented in descendants.')",
    "docstring": "The type specification of an element of this . Example usage: >>> global_batch_size = 16 >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"]) >>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size) >>> dist_dataset = strategy.experimental_distribute_dataset(dataset) >>> dist_dataset.element_spec (PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None), TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)), PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None), TensorSpec(shape=(None, 1), dtype=tf.int32, name=None))) Returns: A nested structure of objects matching the structure of an element of this . This returned value is typically a object and specifies the of individual components.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py",
    "ast_data": "FunctionDef name:element_spec arg:self arguments arg Raise Call"
  },
  {
    "library": "kornia",
    "name": "normalize_keypoints",
    "source_code": "@custom_fwd(cast_inputs=torch.float32)\ndef normalize_keypoints(kpts: Tensor, size: Tensor) -> Tensor:\n    if isinstance(size, torch.Size):\n        size = torch.tensor(size)[None]\n    shift = size.float().to(kpts) / 2\n    scale = size.max(1).values.float().to(kpts) / 2\n    kpts = (kpts - shift[:, None]) / scale[:, None, None]\n    return kpts",
    "docstring": "Normalize tensor of keypoints.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\lightglue.py",
    "ast_data": "FunctionDef name:normalize_keypoints arg:kpts arg:size arguments arg arg If Call Assign Call Assign Call Call Assign Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "filled",
    "source_code": "def filled(self, fill_value=None):\n    return asarray(self).filled(fill_value)[()]",
    "docstring": "Return a copy with masked fields filled with a given value. Parameters ---------- fill_value : array_like, optional The value to use for invalid entries. Can be scalar or non-scalar. If latter is the case, the filled array should be broadcastable over input array. Default is None, in which case the attribute is used instead. Returns ------- filled_void A object See Also -------- MaskedArray.filled",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:filled arg:self arg:fill_value arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_fspecial_gauss",
    "source_code": "def _fspecial_gauss(size, sigma):\n    size = ops.convert_to_tensor(size, dtypes.int32)\n    sigma = ops.convert_to_tensor(sigma)\n    coords = math_ops.cast(math_ops.range(size), sigma.dtype)\n    coords -= math_ops.cast(size - 1, sigma.dtype) / 2.0\n    g = math_ops.square(coords)\n    g *= -0.5 / math_ops.square(sigma)\n    g = array_ops.reshape(g, shape=[1, -1]) + array_ops.reshape(g, shape=[-1, 1])\n    g = array_ops.reshape(g, shape=[1, -1])\n    g = nn_ops.softmax(g)\n    return array_ops.reshape(g, shape=[size, size, 1, 1])",
    "docstring": "Function to mimic the 'fspecial' gaussian MATLAB function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_fspecial_gauss arg:size arg:sigma arguments arg arg Assign Call Assign Call Assign Call Call Call Assign Call Call Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "run_jit_forward",
    "source_code": "def run_jit_forward(self, num_runs, print_per_iter=False, cuda_sync=False):\n    if self._jit_forward_graph is None:\n        self._jit_forward_graph = self._generate_jit_forward_graph()\n    self._jit_forward_graph(num_runs)",
    "docstring": "Run the forward path of an op with JIT mode",
    "type": "method",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_pytorch.py",
    "ast_data": "FunctionDef name:run_jit_forward arg:self arg:num_runs arg:print_per_iter arg:cuda_sync arguments arg arg arg arg If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "For",
    "source_code": "def For(start, limit, delta, inputs, body, name=None, hostmem=None, rewrite_with_while=None):\n    if rewrite_with_while:\n        return _ForUsingWhile(start, limit, delta, inputs, body, name, hostmem)\n    if body.captured_inputs:\n        ret = gen_functional_ops._for(start, limit, delta, inputs + body.captured_inputs, _LoopBodyCaptureWrapper(body), name=name)\n        ret = ret[:-len(body.captured_inputs)]\n    else:\n        ret = gen_functional_ops._for(start, limit, delta, inputs, body, name=name)\n    if hostmem:\n        num_for_params = 3\n        input_attr = attr_value_pb2.AttrValue()\n        input_attr.list.i.extend([num_for_params + i for i in hostmem])\n        ret[0].op._set_attr('_input_hostmem', input_attr)\n        output_attr = attr_value_pb2.AttrValue()\n        output_attr.list.i.extend(hostmem)\n        ret[0].op._set_attr('_output_hostmem', output_attr)\n    return ret",
    "docstring": "out = input; for i in range(start, limit, delta) out = body(i, out). Args: start: A of type . limit: A of type . delta: A of type . inputs: A list of objects. A list of input tensors whose types are T. body: A function takes a list of tensors and returns another list of tensors. Both lists have the same types as (int32, T...). name: A name for the operation (optional). hostmem: A list of integer. If i is in the list, inputs[i] is a host memory tensor. In other words, (i+1)-th argument of the body function is expecting a host memory. rewrite_with_while: If True, using While op to implement the For. Returns: A list of objects. Has the same type as . A list of output tensors whose types are T.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\functional_ops.py",
    "ast_data": "FunctionDef name:For arg:start arg:limit arg:delta arg:inputs arg:body arg:name arg:hostmem arg:rewrite_with_while arguments arg arg arg arg arg arg arg arg If Return return:yes Call If Assign Call Call Assign Call Assign Call If Assign Assign Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "apply_non_transform_mask",
    "source_code": "def apply_non_transform_mask(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor]=None) -> Tensor:\n    raise NotImplementedError",
    "docstring": "Process masks corresponding to the inputs that are no transformation applied.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\base.py",
    "ast_data": "FunctionDef name:apply_non_transform_mask arg:self arg:input arg:params arg:flags arg:transform arguments arg arg arg arg arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "_check_field_annotations",
    "source_code": "def _check_field_annotations(cls):\n    annotations = getattr(cls, '__annotations__', {})\n    for name, value in cls.__dict__.items():\n        if name == 'Spec':\n            if not isinstance(value, type):\n                raise ValueError(f'{cls.__qualname__}.Spec must be a nested class; got {value}.')\n            if value.__bases__ != (type_spec.TypeSpec,) and value.__bases__ != (object,):\n                raise ValueError(f'{cls.__qualname__}.Spec must be directly subclassed from tf.TypeSpec.')\n        elif extension_type_field.ExtensionTypeField.is_reserved_name(name):\n            raise ValueError(f\"The field annotations for {cls.__name__} are invalid. Field '{name}' is reserved.\")\n    for name in annotations:\n        if extension_type_field.ExtensionTypeField.is_reserved_name(name):\n            raise ValueError(f\"The field annotations for {cls.__name__} are invalid. Field '{name}' is reserved.\")\n    for key, value in cls.__dict__.items():\n        if not (key in annotations or callable(value) or key.startswith('_abc_') or (key == '_tf_extension_type_fields') or (key.startswith('__') and key.endswith('__')) or isinstance(value, (property, classmethod, staticmethod))):\n            raise ValueError(f'The field annotations for {cls.__name__} are invalid. Field {key} is missing a type annotation.')",
    "docstring": "Validates the field annotations for tf.ExtensionType subclass .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py",
    "ast_data": "FunctionDef name:_check_field_annotations arg:cls arguments arg Assign Call For Call If Compare If Call Raise Call If BoolOp Compare Compare Raise Call If Call Raise Call For If Call Raise Call For Call If BoolOp Compare Call Call Compare BoolOp Call Call Call Raise Call"
  },
  {
    "library": "scipy",
    "name": "beip_zeros",
    "source_code": "def beip_zeros(nt):\n    if not isscalar(nt) or floor(nt) != nt or nt <= 0:\n        raise ValueError('nt must be positive integer scalar.')\n    return _specfun.klvnzo(nt, 6)",
    "docstring": "Compute nt zeros of the derivative of the Kelvin function bei. Parameters ---------- nt : int Number of zeros to compute. Must be positive. Returns ------- ndarray First zeros of the derivative of the Kelvin function. See Also -------- bei, beip References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:beip_zeros arg:nt arguments arg If BoolOp Call Compare Call Compare Raise Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "do_check",
    "source_code": "def do_check(self):\n    sess = cherrypy.session\n    request = cherrypy.serving.request\n    response = cherrypy.serving.response\n    username = sess.get(self.session_key)\n    if not username:\n        sess[self.session_key] = username = self.anonymous()\n        self._debug_message('No session[username], trying anonymous')\n    if not username:\n        url = cherrypy.url(qs=request.query_string)\n        self._debug_message('No username, routing to login_screen with from_page %(url)r', locals())\n        response.body = self.login_screen(url)\n        if 'Content-Length' in response.headers:\n            del response.headers['Content-Length']\n        return True\n    self._debug_message('Setting request.login to %(username)r', locals())\n    request.login = username\n    self.on_check(username)",
    "docstring": "Assert username. Raise redirect, or return True if request handled.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\cptools.py",
    "ast_data": "FunctionDef name:do_check arg:self arguments arg Assign Assign Assign Assign Call If Assign Call Call If Assign Call Call Call Assign Call If Compare Return return:yes Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "sigmoid",
    "source_code": "@dispatch.add_dispatch_support\ndef sigmoid(x):\n    output = nn.sigmoid(x)\n    output._keras_logits = x\n    return output",
    "docstring": "Sigmoid activation function, . Applies the sigmoid activation function. For small values (5) the result of the function gets close to 1. Sigmoid is equivalent to a 2-element Softmax, where the second element is assumed to be zero. The sigmoid function always returns a value between 0 and 1. For example: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.sigmoid(a) >>> b.numpy() array([2.0611537e-09, 2.6894143e-01, 5.0000000e-01, 7.3105860e-01, 1.0000000e+00], dtype=float32) Args: x: Input tensor. Returns: Tensor with the sigmoid activation: .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py",
    "ast_data": "FunctionDef name:sigmoid arg:x arguments arg Assign Call Assign Return return:yes"
  },
  {
    "library": "authlib",
    "name": "get_default_redirect_uri",
    "source_code": "def get_default_redirect_uri(self):\n    raise NotImplementedError()",
    "docstring": "A method to get client default redirect_uri. For instance, the database table for client has a column called ``:: def get_default_redirect_uri(self): return self.default_redirect_uri :return: A URL string",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:get_default_redirect_uri arg:self arguments arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "_delegate",
    "source_code": "def _delegate(xp: ModuleType, *backends: Backend) -> bool:\n    return any((backend.is_namespace(xp) for backend in backends))",
    "docstring": "Check whether is one of the to delegate to. Parameters ---------- xp : array_namespace Array namespace to check. *backends : IsNamespace Arbitrarily many backends (from the `xpbackends` otherwise.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_delegation.py",
    "ast_data": "FunctionDef name:_delegate arg:xp arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "get_paginator",
    "source_code": "def get_paginator(self, queryset, per_page, orphans=0, allow_empty_first_page=True, **kwargs):\n    return self.paginator_class(queryset, per_page, orphans=orphans, allow_empty_first_page=allow_empty_first_page, **kwargs)",
    "docstring": "Return an instance of the paginator for this view.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\list.py",
    "ast_data": "FunctionDef name:get_paginator arg:self arg:queryset arg:per_page arg:orphans arg:allow_empty_first_page arguments arg arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "django",
    "name": "patch_response_headers",
    "source_code": "def patch_response_headers(response, cache_timeout=None):\n    if cache_timeout is None:\n        cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS\n    if cache_timeout < 0:\n        cache_timeout = 0\n    if not response.has_header('Expires'):\n        response.headers['Expires'] = http_date(time.time() + cache_timeout)\n    patch_cache_control(response, max_age=cache_timeout)",
    "docstring": "Add HTTP caching headers to the given HttpResponse: Expires and Cache-Control. Each header is only added if it isn't already set. cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used by default.",
    "type": "function",
    "file_path": "django\\django\\utils\\cache.py",
    "ast_data": "FunctionDef name:patch_response_headers arg:response arg:cache_timeout arguments arg arg If Compare Assign If Compare Assign If Call Assign Call Call Call"
  },
  {
    "library": "django",
    "name": "_get_m2m_attr",
    "source_code": "def _get_m2m_attr(self, related, attr):\n    cache_attr = '_m2m_%s_cache' % attr\n    if hasattr(self, cache_attr):\n        return getattr(self, cache_attr)\n    if self.remote_field.through_fields is not None:\n        link_field_name = self.remote_field.through_fields[0]\n    else:\n        link_field_name = None\n    for f in self.remote_field.through._meta.fields:\n        if f.is_relation and f.remote_field.model == related.related_model and (link_field_name is None or link_field_name == f.name):\n            setattr(self, cache_attr, getattr(f, attr))\n            return getattr(self, cache_attr)",
    "docstring": "Function that can be curried to provide the source accessor or DB column name for the m2m table.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\fields\\related.py",
    "ast_data": "FunctionDef name:_get_m2m_attr arg:self arg:related arg:attr arguments arg arg arg Assign If Call Return return:yes Call If Compare Assign Assign For If BoolOp Compare BoolOp Compare Compare Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "extend_results",
    "source_code": "def extend_results(self, results):\n    for r in results:\n        if not isinstance(r, common.Measurement):\n            raise ValueError(f'Expected an instance of `Measurement`, got {type(r)} instead.')\n    self._results.extend(results)",
    "docstring": "Append results to already stored ones. All added results must be instances of ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\compare.py",
    "ast_data": "FunctionDef name:extend_results arg:self arg:results arguments arg arg For If Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "on_test_batch_begin",
    "source_code": "@doc_controls.for_subclass_implementers\n@generic_utils.default\ndef on_test_batch_begin(self, batch, logs=None):\n    pass",
    "docstring": "Called at the beginning of a batch in methods. Also called at the beginning of a validation batch in the methods, if validation data is provided. Subclasses should override for any actions to run. Note that if the argument to in is set to , this method will only be called every batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of . Typically, the values of the 's metrics are returned. Example: .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_test_batch_begin arg:self arg:batch arg:logs arguments arg arg arg"
  },
  {
    "library": "scipy",
    "name": "pdf",
    "source_code": "def pdf(self, x, *args, **kwds):\n    args, loc, scale = self._parse_args(*args, **kwds)\n    x, loc, scale = map(asarray, (x, loc, scale))\n    args = tuple(map(asarray, args))\n    dtyp = np.promote_types(x.dtype, np.float64)\n    x = np.asarray((x - loc) / scale, dtype=dtyp)\n    cond0 = self._argcheck(*args) & (scale > 0)\n    cond1 = self._support_mask(x, *args) & (scale > 0)\n    cond = cond0 & cond1\n    output = zeros(shape(cond), dtyp)\n    putmask(output, 1 - cond0 + np.isnan(x), self.badvalue)\n    if np.any(cond):\n        goodargs = argsreduce(cond, *(x,) + args + (scale,))\n        scale, goodargs = (goodargs[-1], goodargs[:-1])\n        place(output, cond, self._pdf(*goodargs) / scale)\n    if output.ndim == 0:\n        return output[()]\n    return output",
    "docstring": "Probability density function at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- pdf : ndarray Probability density function evaluated at x",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py",
    "ast_data": "FunctionDef name:pdf arg:self arg:x arguments arg arg arg arg Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Compare Assign Call Compare Assign Assign Call Call Call Call If Call Assign Call Assign Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "traverse_state_dict",
    "source_code": "def traverse_state_dict(state_dict: STATE_DICT_TYPE, visitor: Callable[[OBJ_PATH, STATE_DICT_ITEM], None], keep_traversing: Callable[[STATE_DICT_ITEM], bool]=_keep_visiting_tensors) -> None:\n\n    def _is_terminal(value: STATE_DICT_ITEM) -> bool:\n        values: Collection[STATE_DICT_ITEM]\n        if isinstance(value, Mapping):\n            return False\n        elif isinstance(value, list):\n            values = value\n        else:\n            return True\n        for entry in values:\n            if isinstance(entry, (Mapping, list)) and (not _is_terminal(entry)):\n                return False\n            if keep_traversing is not None and keep_traversing(entry):\n                return False\n        return True\n\n    def _traverse_obj(path: OBJ_PATH, value: STATE_DICT_ITEM) -> None:\n        if isinstance(value, Mapping):\n            for k, v in value.items():\n                _traverse_obj(path + (str(k),), v)\n        elif _is_terminal(value):\n            visitor(path, value)\n        elif isinstance(value, (list, tuple)):\n            for i, v in enumerate(value):\n                _traverse_obj(path + (i,), v)\n    for key, value in state_dict.items():\n        _traverse_obj((str(key),), value)",
    "docstring": "Invoke `` will only be applied to elements in a list or a tuple, if the container contains tensors or mappings.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\_traverse.py",
    "ast_data": "FunctionDef name:traverse_state_dict arg:state_dict arg:visitor arg:keep_traversing arguments arg arg arg FunctionDef name:_is_terminal arg:value arguments arg If Call Return return:yes If Call Assign Return return:yes For If BoolOp Call Call Return return:yes If BoolOp Compare Call Return return:yes Return return:yes FunctionDef name:_traverse_obj arg:path arg:value arguments arg arg If Call For Call Call Call If Call Call If Call For Call Call For Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "select",
    "source_code": "@_onnx_symbolic('aten::select')\n@symbolic_helper.quantized_args(True)\n@symbolic_helper.parse_args('v', 'i', 'v')\ndef select(g: jit_utils.GraphContext, self, dim, index):\n    index = symbolic_helper._maybe_get_scalar(index)\n    if not symbolic_helper._is_value(index) and index < 0:\n        if index == -1:\n            end_index = _constants.INT64_MAX\n        else:\n            end_index = index + 1\n        slice_node = symbolic_helper._slice_helper(g, self, axes=[dim], starts=[index], ends=[end_index])\n        return symbolic_helper._squeeze_helper(g, slice_node, [dim])\n    else:\n        return g.op('Gather', self, index, axis_i=dim)",
    "docstring": "Implement the select functionality for a pytorch tensor in ONNX. Selects elements from the input tensor along the specified dimension based on the tensor.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py",
    "ast_data": "FunctionDef name:select arg:g arg:self arg:dim arg:index arguments arg arg arg arg Assign Call If BoolOp Call Compare If Compare Assign Assign Assign Call Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_torchgen_root",
    "source_code": "def get_torchgen_root() -> Path:\n    return Path(__file__).parent.resolve()",
    "docstring": "If you're depending on torchgen out-of-tree, you can use the root to figure out the path to native_functions.yaml",
    "type": "function",
    "file_path": "pytorch\\torchgen\\gen.py",
    "ast_data": "FunctionDef name:get_torchgen_root arguments Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, value):\n    if not (isinstance(value, tensor.Tensor) and value.dtype.is_floating):\n        raise ValueError('Regression output value must be a float32 Tensor; got {}'.format(value))\n    self._value = value",
    "docstring": "Constructor for . Args: value: a float giving the predicted values. Required. Raises: ValueError: if the value is not a with dtype tf.float32.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_output.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:value arguments arg arg If BoolOp Call Raise Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "__len__",
    "source_code": "def __len__(self):\n    return len(self.steps)",
    "docstring": "Returns the length of the Pipeline",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\pipeline.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "TrainOutput",
    "source_code": "class TrainOutput(_SupervisedOutput):\n\n    def _get_signature_def_fn(self):\n        return unexported_signature_utils.supervised_train_signature_def",
    "docstring": "Represents the output of a supervised training process. This class generates the appropriate signature def for exporting training output by type-checking and wrapping loss, predictions, and metrics values.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_output.py",
    "ast_data": "ClassDef name:TrainOutput FunctionDef name:_get_signature_def_fn arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "nbytes",
    "source_code": "@cache_readonly\ndef nbytes(self) -> int:\n    return self._nbytes(False)",
    "docstring": "return the number of bytes in the underlying data",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:nbytes arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "get_freq",
    "source_code": "def get_freq(self) -> str | None:\n    if not self.is_monotonic or not self.index._is_unique:\n        return None\n    delta = self.deltas[0]\n    ppd = periods_per_day(self._creso)\n    if delta and _is_multiple(delta, ppd):\n        return self._infer_daily_rule()\n    if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]):\n        return 'bh'\n    if not self.is_unique_asi8:\n        return None\n    delta = self.deltas_asi8[0]\n    pph = ppd // 24\n    ppm = pph // 60\n    pps = ppm // 60\n    if _is_multiple(delta, pph):\n        return _maybe_add_count('h', delta / pph)\n    elif _is_multiple(delta, ppm):\n        return _maybe_add_count('min', delta / ppm)\n    elif _is_multiple(delta, pps):\n        return _maybe_add_count('s', delta / pps)\n    elif _is_multiple(delta, pps // 1000):\n        return _maybe_add_count('ms', delta / (pps // 1000))\n    elif _is_multiple(delta, pps // 1000000):\n        return _maybe_add_count('us', delta / (pps // 1000000))\n    else:\n        return _maybe_add_count('ns', delta)",
    "docstring": "Find the appropriate frequency string to describe the inferred frequency of self.i8values Returns ------- str or None",
    "type": "method",
    "file_path": "pandas\\pandas\\tseries\\frequencies.py",
    "ast_data": "FunctionDef name:get_freq arg:self arguments arg If BoolOp Return return:no Assign Assign Call If BoolOp Call Return return:yes Call If Compare Return return:yes If Return return:no Assign Assign Assign Assign If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "base_dtype",
    "source_code": "def base_dtype(dtype):\n    dtype = dtypes.as_dtype(dtype)\n    if hasattr(dtype, 'base_dtype'):\n        return dtype.base_dtype\n    return dtype",
    "docstring": "Returns a non-reference based on this .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py",
    "ast_data": "FunctionDef name:base_dtype arg:dtype arguments arg Assign Call If Call Return return:yes Return return:yes"
  },
  {
    "library": "django",
    "name": "id_for_label",
    "source_code": "def id_for_label(self, id_):\n    return id_",
    "docstring": "Return the HTML ID attribute of this Widget for use by a , given the ID of the field. Return an empty string if no ID is available. This hook is necessary because some widgets have multiple HTML elements and, thus, multiple IDs. In that case, this method should return an ID value that corresponds to the first ID in the widget's tags.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:id_for_label arg:self arg:id_ arguments arg arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "stop",
    "source_code": "def stop(self) -> None:\n    logger.info('EtcdServer stop method called')\n    stop_etcd(self._etcd_proc, self._base_data_dir)",
    "docstring": "Stop the server and cleans up auto generated resources (e.g. data dir).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_server.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_hatchcolor",
    "source_code": "def get_hatchcolor(self):\n    if self._hatch_color == 'edge':\n        if self._edgecolor[3] == 0:\n            return colors.to_rgba(mpl.rcParams['patch.edgecolor'])\n        return self.get_edgecolor()\n    return self._hatch_color",
    "docstring": "Return the hatch color.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_hatchcolor arg:self arguments arg If Compare If Compare Return return:yes Call Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_linewidth",
    "source_code": "def get_linewidth(self):\n    return self._linewidth",
    "docstring": "Return the linewidth in points. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_linewidth arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register_all_state_dict_hooks",
    "source_code": "def _register_all_state_dict_hooks(state: _FSDPState):\n    for hook_registration_fn_str, hook, hook_registration_fn_kwargs in (('register_state_dict_pre_hook', _pre_state_dict_hook, {}), ('_register_state_dict_hook', _post_state_dict_hook, {}), ('_register_load_state_dict_pre_hook', _pre_load_state_dict_hook, {'with_module': True}), ('register_load_state_dict_post_hook', _post_load_state_dict_hook, {})):\n        _register_state_dict_hooks_base(state, hook_registration_fn_str, hook, hook_registration_fn_kwargs)",
    "docstring": "Registers pre-save, post-save, pre-load, and post-load state dict hooks.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_state_dict_utils.py",
    "ast_data": "FunctionDef name:_register_all_state_dict_hooks arg:state arguments arg For Call"
  },
  {
    "library": "matplotlib",
    "name": "get_datalim",
    "source_code": "def get_datalim(self, transData):\n    datalim = transforms.Bbox.null()\n    datalim.update_from_data_xy((self.get_transform() - transData).transform(np.concatenate([self._bbox, [self._bbox.minpos]])))\n    return datalim",
    "docstring": "Calculate the data limits and return them as a .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_datalim arg:self arg:transData arguments arg arg Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "resolve_reference_detect_inventory",
    "source_code": "def resolve_reference_detect_inventory(env: BuildEnvironment, node: pending_xref, contnode: TextElement) -> nodes.reference | None:\n    resolve_self = env.config.intersphinx_resolve_self\n    res = resolve_reference_any_inventory(env, True, node, contnode)\n    if res is not None:\n        return res\n    target = node['reftarget']\n    if ':' not in target:\n        return None\n    inv_name, _, new_target = target.partition(':')\n    self_referential = bool(resolve_self) and resolve_self == inv_name\n    if self_referential:\n        node['reftarget'] = new_target\n        node['intersphinx_self_referential'] = True\n        return None\n    if not inventory_exists(env, inv_name):\n        return None\n    node['reftarget'] = new_target\n    res_inv = resolve_reference_in_inventory(env, inv_name, node, contnode)\n    node['reftarget'] = target\n    return res_inv",
    "docstring": "Attempt to resolve a missing reference via intersphinx references. Resolution is tried first with the target as is in any inventory. If this does not succeed, then the target is split by the first `` is a named inventory, then resolution is tried in that inventory with the new target.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py",
    "ast_data": "FunctionDef name:resolve_reference_detect_inventory arg:env arg:node arg:contnode arguments arg arg arg Assign Assign Call If Compare Return return:yes Assign If Compare Return return:no Assign Call Assign BoolOp Call Compare If Assign Assign Return return:no If Call Return return:no Assign Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "function_type",
    "source_code": "@property\n@abc.abstractmethod\ndef function_type(self) -> FunctionType:\n    pass",
    "docstring": "Returns a FunctionType describing this callable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\types\\core.py",
    "ast_data": "FunctionDef name:function_type arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "device",
    "source_code": "@tf_export(v1=['device'])\ndef device(device_name_or_function) -> ContextManager[None]:\n    if context.executing_eagerly():\n        if callable(device_name_or_function):\n            raise RuntimeError('tf.device does not support functions when eager execution is enabled.')\n        return context.device(device_name_or_function)\n    elif executing_eagerly_outside_functions():\n\n        @tf_contextlib.contextmanager\n        def combined(device_name_or_function):\n            with get_default_graph().device(device_name_or_function):\n                if not callable(device_name_or_function):\n                    with context.device(device_name_or_function):\n                        yield\n                else:\n                    yield\n        return combined(device_name_or_function)\n    else:\n        return get_default_graph().device(device_name_or_function)",
    "docstring": "Wrapper for using the default graph. See for more details. Args: device_name_or_function: The device name or function to use in the context. Returns: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If eager execution is enabled and a function is passed in.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:device arg:device_name_or_function arguments arg If Call If Call Raise Call Return return:yes Call If Call FunctionDef name:combined arg:device_name_or_function arguments arg With Call Call If Call With Call Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y=None):\n    self.fit_predict(X, y)\n    return self",
    "docstring": "Estimate model parameters with the EM algorithm. The method fits the model `` is ignored and a single initialization is performed upon the first call. Upon consecutive calls, training starts where it left off. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object The fitted mixture.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_explode",
    "source_code": "def _explode(self) -> tuple[Self, npt.NDArray[np.uint64]]:\n    values = self.copy()\n    counts = np.ones(shape=(len(self),), dtype=np.uint64)\n    return (values, counts)",
    "docstring": "Transform each element of list-like to a row. For arrays that do not contain list-like elements the default implementation of this method just returns a copy and an array of ones (unchanged index). Returns ------- ExtensionArray Array with the exploded values. np.ndarray[uint64] The original lengths of each list-like for determining the resulting index. See Also -------- Series.explode : The method on the `` object that this extension array method is meant to support. Examples -------- >>> import pyarrow as pa >>> a = pd.array( ... [[1, 2, 3], [4], [5, 6]], dtype=pd.ArrowDtype(pa.list_(pa.int64())) ... ) >>> a._explode() ( [1, 2, 3, 4, 5, 6] Length: 6, dtype: int64[pyarrow], array([3, 1, 2], dtype=int32))",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_explode arg:self arguments arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "_active_downloads",
    "source_code": "def _active_downloads(self, slot: str) -> int:\n    if slot not in self.downloader.slots:\n        return 0\n    return len(self.downloader.slots[slot].active)",
    "docstring": "Return a number of requests in a Downloader for a given slot",
    "type": "method",
    "file_path": "scrapy\\scrapy\\pqueues.py",
    "ast_data": "FunctionDef name:_active_downloads arg:self arg:slot arguments arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self, shape, dtype=None, **kwargs):\n    _validate_kwargs(self.__class__.__name__, kwargs)\n    dtype = _get_dtype(dtype)\n    if not dtype.is_floating and (not dtype.is_integer):\n        raise ValueError('Expected float or integer dtype, got %s.' % dtype)\n    if _PARTITION_SHAPE in kwargs:\n        shape = kwargs[_PARTITION_SHAPE]\n    return self._random_generator.random_uniform(shape, self.minval, self.maxval, dtype)",
    "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point and integer types are supported. If not specified, is used, which default to unless you configured it otherwise (via ). **kwargs: Additional keyword arguments.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If BoolOp Raise Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "from_dlpack",
    "source_code": "def from_dlpack(ext_tensor: Any) -> 'torch.Tensor':\n    if hasattr(ext_tensor, '__dlpack__'):\n        device = ext_tensor.__dlpack_device__()\n        if device[0] in (DLDeviceType.kDLGPU, DLDeviceType.kDLROCM):\n            stream = torch.cuda.current_stream(f'cuda:{device[1]}')\n            is_cuda = device[0] == DLDeviceType.kDLGPU\n            stream_ptr = 1 if is_cuda and stream.cuda_stream == 0 else stream.cuda_stream\n            dlpack = ext_tensor.__dlpack__(stream=stream_ptr)\n        else:\n            dlpack = ext_tensor.__dlpack__()\n    else:\n        dlpack = ext_tensor\n    return _from_dlpack(dlpack)",
    "docstring": "from_dlpack(ext_tensor) -> Tensor Converts a tensor from an external library into a `` function or method. Examples:: >>> import torch.utils.dlpack >>> t = torch.arange(4) # Convert a tensor directly (supported in PyTorch >= 1.10) >>> t2 = torch.from_dlpack(t) >>> t2[:2] = -1 # show that memory is shared >>> t2 tensor([-1, -1, 2, 3]) >>> t tensor([-1, -1, 2, 3]) # The old-style DLPack usage, with an intermediate capsule object >>> capsule = torch.utils.dlpack.to_dlpack(t) >>> capsule >>> t3 = torch.from_dlpack(capsule) >>> t3 tensor([-1, -1, 2, 3]) >>> t3[0] = -9 # now we're sharing memory between 3 tensors >>> t3 tensor([-9, -1, 2, 3]) >>> t2 tensor([-9, -1, 2, 3]) >>> t tensor([-9, -1, 2, 3])",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\dlpack.py",
    "ast_data": "FunctionDef name:from_dlpack arg:ext_tensor arguments arg If Call Assign Call If Compare Assign Call Assign Compare Assign BoolOp Compare Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "monitored_timer",
    "source_code": "@tf_contextlib.contextmanager\ndef monitored_timer(metric_name, state_tracker=None):\n    if not enable_metrics:\n        yield\n    else:\n        if not _METRICS_MAPPING:\n            _init()\n        start_time = time.time()\n        start_state = state_tracker() if state_tracker else None\n        yield\n        duration_sec = time.time() - start_time\n        if state_tracker is None or state_tracker() != start_state:\n            metric = _METRICS_MAPPING[metric_name]\n            metric.get_cell().add(duration_sec)",
    "docstring": "Monitor the execution time and collect it into the specified metric.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\metric_utils.py",
    "ast_data": "FunctionDef name:monitored_timer arg:metric_name arg:state_tracker arguments arg arg If If Call Assign Call Assign Call Assign Call If BoolOp Compare Compare Call Assign Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_early_stopping_scorer",
    "source_code": "def _check_early_stopping_scorer(self, X_binned_small_train, y_small_train, sample_weight_small_train, X_binned_val, y_val, sample_weight_val, raw_predictions_small_train=None, raw_predictions_val=None):\n    if is_classifier(self):\n        y_small_train = self.classes_[y_small_train.astype(int)]\n    self.train_score_.append(self._score_with_raw_predictions(X_binned_small_train, y_small_train, sample_weight_small_train, raw_predictions_small_train))\n    if self._use_validation_data:\n        if is_classifier(self):\n            y_val = self.classes_[y_val.astype(int)]\n        self.validation_score_.append(self._score_with_raw_predictions(X_binned_val, y_val, sample_weight_val, raw_predictions_val))\n        return self._should_stop(self.validation_score_)\n    else:\n        return self._should_stop(self.train_score_)",
    "docstring": "Check if fitting should be early-stopped based on scorer. Scores are computed on validation data or on training data.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:_check_early_stopping_scorer arg:self arg:X_binned_small_train arg:y_small_train arg:sample_weight_small_train arg:X_binned_val arg:y_val arg:sample_weight_val arg:raw_predictions_small_train arg:raw_predictions_val arguments arg arg arg arg arg arg arg arg arg If Call Assign Call Call Call If If Call Assign Call Call Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "SimpleLibraryRegistry",
    "source_code": "class SimpleLibraryRegistry:\n\n    def __init__(self):\n        self._data = {}\n\n    def find(self, qualname: str) -> 'SimpleOperatorEntry':\n        if qualname not in self._data:\n            self._data[qualname] = SimpleOperatorEntry(qualname)\n        return self._data[qualname]",
    "docstring": "Registry for the \"simple\" torch.library APIs The \"simple\" torch.library APIs are a higher-level API on top of the raw PyTorch DispatchKey registration APIs that includes: - fake impl Registrations for these APIs do not go into the PyTorch dispatcher's table because they may not directly involve a DispatchKey. For example, the fake impl is a Python function that gets invoked by FakeTensor. Instead, we manage them here. SimpleLibraryRegistry is a mapping from a fully qualified operator name (including the overload) to SimpleOperatorEntry.",
    "type": "class",
    "file_path": "pytorch\\torch\\_library\\simple_registry.py",
    "ast_data": "ClassDef name:SimpleLibraryRegistry FunctionDef name:__init__ arg:self arguments arg Assign FunctionDef name:find arg:self arg:qualname arguments arg arg If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_RepeatDataset",
    "source_code": "class _RepeatDataset(dataset_ops.UnaryUnchangedStructureDataset):\n\n    def __init__(self, input_dataset, count, name=None):\n        self._input_dataset = input_dataset\n        if count is None:\n            self._count = constant_op.constant(-1, dtype=dtypes.int64, name='count')\n        else:\n            self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name='count')\n        self._name = name\n        variant_tensor = gen_dataset_ops.repeat_dataset(input_dataset._variant_tensor, count=self._count, **self._common_args)\n        super().__init__(input_dataset, variant_tensor)",
    "docstring": "A that repeats its input several times.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\repeat_op.py",
    "ast_data": "ClassDef name:_RepeatDataset FunctionDef name:__init__ arg:self arg:input_dataset arg:count arg:name arguments arg arg arg arg Assign If Compare Assign Call Assign Call Assign Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_should_cache",
    "source_code": "def _should_cache():\n    if context.executing_eagerly():\n        return False\n    graph = ops.get_default_graph()\n    ctxt = graph._get_control_flow_context()\n    in_v1_while_loop = control_flow_util.GetContainingWhileContext(ctxt) is not None\n    in_v2_while_loop = control_flow_util_v2.in_while_loop_defun(graph)\n    return not in_v1_while_loop and (not in_v2_while_loop)",
    "docstring": "Returns True if a default caching device should be set, otherwise False.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn.py",
    "ast_data": "FunctionDef name:_should_cache arguments If Call Return return:yes Assign Call Assign Call Assign Compare Call Assign Call Return return:yes BoolOp"
  },
  {
    "library": "kornia",
    "name": "ImageLoadType",
    "source_code": "class ImageLoadType(Enum):\n    UNCHANGED = 0\n    GRAY8 = 1\n    RGB8 = 2\n    RGBA8 = 3\n    GRAY32 = 4\n    RGB32 = 5",
    "docstring": "Enum to specify the desired image type.",
    "type": "class",
    "file_path": "kornia\\kornia\\io\\io.py",
    "ast_data": "ClassDef name:ImageLoadType Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "_check_optimize_result",
    "source_code": "def _check_optimize_result(solver, result, max_iter=None, extra_warning_msg=None):\n    if solver == 'lbfgs':\n        if max_iter is not None:\n            n_iter_i = min(result.nit, max_iter)\n        else:\n            n_iter_i = result.nit\n        if result.status != 0:\n            warning_msg = f'{solver} failed to converge after {n_iter_i} iteration(s) (status={result.status}):\\n{result.message}\\n'\n            if max_iter is not None and n_iter_i == max_iter:\n                warning_msg += f'\\nIncrease the number of iterations to improve the convergence (max_iter={max_iter}).'\n            warning_msg += '\\nYou might also want to scale the data as shown in:\\n    https://scikit-learn.org/stable/modules/preprocessing.html'\n            if extra_warning_msg is not None:\n                warning_msg += '\\n' + extra_warning_msg\n            warnings.warn(warning_msg, ConvergenceWarning, stacklevel=2)\n    else:\n        raise NotImplementedError\n    return n_iter_i",
    "docstring": "Check the OptimizeResult for successful convergence Parameters ---------- solver : str Solver name. Currently only is supported. result : OptimizeResult Result of the scipy.optimize.minimize function. max_iter : int, default=None Expected maximum number of iterations. extra_warning_msg : str, default=None Extra warning message. Returns ------- n_iter : int Number of iterations.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\optimize.py",
    "ast_data": "FunctionDef name:_check_optimize_result arg:solver arg:result arg:max_iter arg:extra_warning_msg arguments arg arg arg arg If Compare If Compare Assign Call Assign If Compare Assign If BoolOp Compare Compare If Compare Call Raise Return return:yes"
  },
  {
    "library": "scipy",
    "name": "gammainc",
    "source_code": "def gammainc(a, x, dps=50, maxterms=10 ** 8):\n    with mp.workdps(dps):\n        z, a, b = (mp.mpf(a), mp.mpf(x), mp.mpf(x))\n        G = [z]\n        negb = mp.fneg(b, exact=True)\n\n        def h(z):\n            T1 = ([mp.exp(negb), b, z], [1, z, -1], [], G, [1], [1 + z], b)\n            return (T1,)\n        res = mp.hypercomb(h, [z], maxterms=maxterms)\n        return mpf2float(res)",
    "docstring": "Compute gammainc exactly like mpmath does but allow for more summands in hypercomb. See mpmath/functions/expintegrals.py#L134 in the mpmath GitHub repository.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_precompute\\gammainc_data.py",
    "ast_data": "FunctionDef name:gammainc arg:a arg:x arg:dps arg:maxterms arguments arg arg arg arg With Call Assign Call Call Call Assign Assign Call FunctionDef name:h arg:z arguments arg Assign Call Return return:yes Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_hip_file_path",
    "source_code": "def get_hip_file_path(rel_filepath, is_pytorch_extension=False):\n    assert not os.path.isabs(rel_filepath)\n    if not is_pytorch_extension and (not is_out_of_place(rel_filepath)):\n        return rel_filepath\n    dirpath, filename = os.path.split(rel_filepath)\n    root, ext = os.path.splitext(filename)\n    if ext == '.cu':\n        ext = '.hip'\n    orig_filename = filename\n    orig_dirpath = dirpath\n    dirpath = dirpath.replace('cuda', 'hip')\n    dirpath = dirpath.replace('CUDA', 'HIP')\n    dirpath = dirpath.replace('THC', 'THH')\n    root = root.replace('cuda', 'hip')\n    root = root.replace('CUDA', 'HIP')\n    if dirpath != 'caffe2/core':\n        root = root.replace('THC', 'THH')\n    if not is_pytorch_extension and dirpath == orig_dirpath:\n        dirpath = os.path.join(dirpath, 'hip')\n    if is_pytorch_extension and dirpath == orig_dirpath and (root + ext == orig_filename):\n        root = root + '_hip'\n    return os.path.join(dirpath, root + ext)",
    "docstring": "Returns the new name of the hipified file",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py",
    "ast_data": "FunctionDef name:get_hip_file_path arg:rel_filepath arg:is_pytorch_extension arguments arg arg Call If BoolOp Call Return return:yes Assign Call Assign Call If Compare Assign Assign Assign Assign Call Assign Call Assign Call Assign Call Assign Call If Compare Assign Call If BoolOp Compare Assign Call If BoolOp Compare Compare Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "interval_censored",
    "source_code": "@classmethod\ndef interval_censored(cls, low, high):\n    _validate_1d(low, 'low', allow_inf=True)\n    _validate_1d(high, 'high', allow_inf=True)\n    if len(low) != len(high):\n        raise ValueError('`low` and `high` must have the same length.')\n    interval = np.column_stack((low, high))\n    uncensored, left, right, interval = _validate_interval(interval)\n    return cls(uncensored=uncensored, left=left, right=right, interval=interval)",
    "docstring": "Create a instance of interval-censored data. This method is useful when all the data is interval-censored, and the low and high ends of the intervals are already stored in separate one-dimensional arrays. Parameters ---------- low : array_like The one-dimensional array containing the low ends of the intervals. high : array_like The one-dimensional array containing the high ends of the intervals. Returns ------- data : An instance of that represents the collection of censored values. Examples -------- >>> import numpy as np >>> from scipy.stats import CensoredData `` are the low and high ends of a collection of interval-censored values. >>> a = [0.5, 2.0, 3.0, 5.5] >>> b = [1.0, 2.5, 3.5, 7.0] >>> data = CensoredData.interval_censored(low=a, high=b) >>> print(data) CensoredData(4 values: 0 not censored, 4 interval-censored)",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_censored_data.py",
    "ast_data": "FunctionDef name:interval_censored arg:cls arg:low arg:high arguments arg arg arg Call Call If Compare Call Call Raise Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "block",
    "source_code": "@array_function_dispatch(_block_dispatcher)\ndef block(arrays):\n    arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)\n    if list_ndim * final_size > 2 * 512 * 512:\n        return _block_slicing(arrays, list_ndim, result_ndim)\n    else:\n        return _block_concatenate(arrays, list_ndim, result_ndim)",
    "docstring": "Assemble an nd-array from nested lists of blocks. Blocks in the innermost lists are concatenated (see ) along the last dimension (-1), then these are concatenated along the second-last dimension (-2), and so on until the outermost list is reached. Blocks can be of any dimension, but will not be broadcasted using the normal rules. Instead, leading axes of size 1 are inserted, to make `blockblockhstackblockvstackatleast_1datleast_2d`: >>> a = np.array(0) >>> b = np.array([1]) >>> np.block([a]) # atleast_1d(a) array([0]) >>> np.block([b]) # atleast_1d(b) array([1]) >>> np.block([[a]]) # atleast_2d(a) array([[0]]) >>> np.block([[b]]) # atleast_2d(b) array([[1]])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\shape_base.py",
    "ast_data": "FunctionDef name:block arg:arrays arguments arg Assign Call If Compare Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "from_float",
    "source_code": "@classmethod\ndef from_float(cls, mod, use_precomputed_fake_quant=False):\n    assert type(mod) == nni.LinearBn1d, 'qat.' + cls.__name__ + '.from_float only works for ' + nni.LinearBn1d.__name__\n    assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n    assert mod.qconfig, 'Input float module must have a valid config'\n    qconfig = mod.qconfig\n    linear, bn = (mod[0], mod[1])\n    qat_linearbn = cls(linear.in_features, linear.out_features, linear.bias is not None, bn.eps, bn.momentum, False, qconfig)\n    qat_linearbn.weight = linear.weight\n    qat_linearbn.bias = linear.bias\n    qat_linearbn.bn.weight = bn.weight\n    qat_linearbn.bn.bias = bn.bias\n    qat_linearbn.bn.running_mean = bn.running_mean\n    qat_linearbn.bn.running_var = bn.running_var\n    qat_linearbn.bn.num_batches_tracked = bn.num_batches_tracked\n    return qat_linearbn",
    "docstring": "Create a qat module from a float module or qparams_dict Args: `mod' a float module, either produced by torch.ao.quantization utilities or directly from user",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\linear_fused.py",
    "ast_data": "FunctionDef name:from_float arg:cls arg:mod arg:use_precomputed_fake_quant arguments arg arg arg Compare Call Call Assign Assign Assign Call Compare Assign Assign Assign Assign Assign Assign Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_get_bbox_and_child_offsets",
    "source_code": "def _get_bbox_and_child_offsets(self, renderer):\n    raise NotImplementedError('get_bbox_and_offsets must be overridden in derived classes')",
    "docstring": "Return the bbox of the offsetbox and the child offsets. The bbox should satisfy `.RendererBase` subclass Returns ------- bbox list of (xoffset, yoffset) pairs",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:_get_bbox_and_child_offsets arg:self arg:renderer arguments arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "log_comm_debug_tracing_table_to_file",
    "source_code": "def log_comm_debug_tracing_table_to_file(self, file_name='comm_mode_log.txt', noise_level=3):\n    ansi_escape = re.compile('\\\\x1B\\\\[[0-?]*[ -/]*[@-~]')\n    table = ansi_escape.sub('', self.generate_comm_debug_tracing_table(noise_level))\n    with open(file_name, 'w') as log_file:\n        log_file.write(table)",
    "docstring": "Alternative to console CommDebugMode output, writes to file specified by the user",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_comm_mode.py",
    "ast_data": "FunctionDef name:log_comm_debug_tracing_table_to_file arg:self arg:file_name arg:noise_level arguments arg arg arg Assign Call Assign Call Call With Call Call"
  },
  {
    "library": "matplotlib",
    "name": "contains",
    "source_code": "def contains(self, mouseevent, radius=None):\n    if self._different_canvas(mouseevent):\n        return (False, {})\n    radius = self._process_radius(radius)\n    codes = self.get_path().codes\n    if codes is not None:\n        vertices = self.get_path().vertices\n        idxs, = np.where(codes == Path.MOVETO)\n        idxs = idxs[1:]\n        subpaths = map(Path, np.split(vertices, idxs), np.split(codes, idxs))\n    else:\n        subpaths = [self.get_path()]\n    inside = any((subpath.contains_point((mouseevent.x, mouseevent.y), self.get_transform(), radius) for subpath in subpaths))\n    return (inside, {})",
    "docstring": "Test whether the mouse event occurred in the patch. Parameters ---------- mouseevent : Where the user clicked. radius : float, optional Additional margin on the patch in target coordinates of . See for further details. If , the default value depends on the state of the object: - If is a number, the default is that value. This is so that picking works as expected. - Otherwise if the edge color has a non-zero alpha, the default is half of the linewidth. This is so that all the colored pixels are \"in\" the patch. - Finally, if the edge has 0 alpha, the default is 0. This is so that patches without a stroked edge do not have points outside of the filled region report as \"in\" due to an invisible edge. Returns ------- (bool, empty dict)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:contains arg:self arg:mouseevent arg:radius arguments arg arg arg If Call Return return:yes Assign Call Assign Call If Compare Assign Call Assign Call Compare Assign Assign Call Call Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "InconvertibleTensorProto",
    "source_code": "class InconvertibleTensorProto:\n\n    def __init__(self, tensor_proto, initialized=True):\n        self._tensor_proto = tensor_proto\n        self._initialized = initialized\n\n    def __str__(self):\n        output = '' if self._initialized else 'Uninitialized tensor:\\n'\n        output += str(self._tensor_proto)\n        return output\n\n    @property\n    def initialized(self):\n        return self._initialized",
    "docstring": "Represents a TensorProto that cannot be converted to np.ndarray.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "ClassDef name:InconvertibleTensorProto FunctionDef name:__init__ arg:self arg:tensor_proto arg:initialized arguments arg arg arg Assign Assign FunctionDef name:__str__ arg:self arguments arg Assign Call Return return:yes FunctionDef name:initialized arg:self arguments arg Return return:yes"
  },
  {
    "library": "pygame",
    "name": "get",
    "source_code": "def get():\n    _ft_init_check()\n    return pygame.event.get()",
    "docstring": "get() -> list of Events get all events from the queue",
    "type": "function",
    "file_path": "pygame\\src_py\\fastevent.py",
    "ast_data": "FunctionDef name:get arguments Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "refine_triangulation",
    "source_code": "def refine_triangulation(self, return_tri_index=False, subdiv=3):\n    refi_triangulation = self._triangulation\n    ntri = refi_triangulation.triangles.shape[0]\n    ancestors = np.arange(ntri, dtype=np.int32)\n    for _ in range(subdiv):\n        refi_triangulation, ancestors = self._refine_triangulation_once(refi_triangulation, ancestors)\n    refi_npts = refi_triangulation.x.shape[0]\n    refi_triangles = refi_triangulation.triangles\n    if return_tri_index:\n        found_index = np.full(refi_npts, -1, dtype=np.int32)\n        tri_mask = self._triangulation.mask\n        if tri_mask is None:\n            found_index[refi_triangles] = np.repeat(ancestors, 3).reshape(-1, 3)\n        else:\n            ancestor_mask = tri_mask[ancestors]\n            found_index[refi_triangles[ancestor_mask, :]] = np.repeat(ancestors[ancestor_mask], 3).reshape(-1, 3)\n            found_index[refi_triangles[~ancestor_mask, :]] = np.repeat(ancestors[~ancestor_mask], 3).reshape(-1, 3)\n        return (refi_triangulation, found_index)\n    else:\n        return refi_triangulation",
    "docstring": "Compute a uniformly refined triangulation *refi_triangulation* of the encapsulated :attr:. This function refines the encapsulated triangulation by splitting each father triangle into 4 child sub-triangles built on the edges midside nodes, recursing *subdiv* times. In the end, each triangle is hence divided into `~matplotlib.tri.Triangulation` The refined triangulation. found_index : int array Index of the initial triangulation containing triangle, for each point of *refi_triangulation*. Returned only if *return_tri_index* is set to True.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_trirefine.py",
    "ast_data": "FunctionDef name:refine_triangulation arg:self arg:return_tri_index arg:subdiv arguments arg arg arg Assign Assign Assign Call For Call Assign Call Assign Assign If Assign Call Assign If Compare Assign Call Call Assign Assign Call Call Assign Call Call Return return:yes Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_maybe_mask_results",
    "source_code": "def _maybe_mask_results(self, result: np.ndarray, fill_value=iNaT, convert=None) -> np.ndarray:\n    if self._hasna:\n        if convert:\n            result = result.astype(convert)\n        if fill_value is None:\n            fill_value = np.nan\n        np.putmask(result, self._isnan, fill_value)\n    return result",
    "docstring": "Parameters ---------- result : np.ndarray fill_value : object, default iNaT convert : str, dtype or None Returns ------- result : ndarray with values replace by the fill_value mask the result if needed, convert to the provided dtype if its not None This is an internal routine.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py",
    "ast_data": "FunctionDef name:_maybe_mask_results arg:self arg:result arg:fill_value arg:convert arguments arg arg arg arg If If Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "SessionTool",
    "source_code": "class SessionTool(Tool):\n\n    def __init__(self):\n        Tool.__init__(self, 'before_request_body', _sessions.init)\n\n    def _lock_session(self):\n        cherrypy.serving.session.acquire_lock()\n\n    def _setup(self):\n        hooks = cherrypy.serving.request.hooks\n        conf = self._merged_args()\n        p = conf.pop('priority', None)\n        if p is None:\n            p = getattr(self.callable, 'priority', self._priority)\n        hooks.attach(self._point, self.callable, priority=p, **conf)\n        locking = conf.pop('locking', 'implicit')\n        if locking == 'implicit':\n            hooks.attach('before_handler', self._lock_session)\n        elif locking == 'early':\n            hooks.attach('before_request_body', self._lock_session, priority=60)\n        else:\n            pass\n        hooks.attach('before_finalize', _sessions.save)\n        hooks.attach('on_end_request', _sessions.close)\n\n    def regenerate(self):\n        sess = cherrypy.serving.session\n        sess.regenerate()\n        relevant = ('path', 'path_header', 'name', 'timeout', 'domain', 'secure')\n        conf = dict(((k, v) for k, v in self._merged_args().items() if k in relevant))\n        _sessions.set_response_cookie(**conf)",
    "docstring": "Session Tool for CherryPy. sessions.locking When 'implicit' (the default), the session will be locked for you, just before running the page handler. When 'early', the session will be locked before reading the request body. This is off by default for safety reasons; for example, a large upload would block the session, denying an AJAX progress meter (_). When 'explicit' (or any other value), you need to call cherrypy.session.acquire_lock() yourself before using session data.",
    "type": "class",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "ClassDef name:SessionTool FunctionDef name:__init__ arg:self arguments arg Call FunctionDef name:_lock_session arg:self arguments arg Call FunctionDef name:_setup arg:self arguments arg Assign Assign Call Assign Call If Compare Assign Call Call Assign Call If Compare Call If Compare Call Call Call FunctionDef name:regenerate arg:self arguments arg Assign Call Assign Assign Call Call Call Compare Call"
  },
  {
    "library": "scipy",
    "name": "QuantileTestResult",
    "source_code": "@dataclass\nclass QuantileTestResult:\n    statistic: float\n    statistic_type: int\n    pvalue: float\n    _alternative: list[str] = field(repr=False)\n    _x: np.ndarray = field(repr=False)\n    _p: float = field(repr=False)\n\n    def confidence_interval(self, confidence_level=0.95):\n        alternative = self._alternative\n        p = self._p\n        x = np.sort(self._x)\n        n = len(x)\n        bd = stats.binom(n, p)\n        if confidence_level <= 0 or confidence_level >= 1:\n            message = '`confidence_level` must be a number between 0 and 1.'\n            raise ValueError(message)\n        low_index = np.nan\n        high_index = np.nan\n        if alternative == 'less':\n            p = 1 - confidence_level\n            low = -np.inf\n            high_index = int(bd.isf(p))\n            high = x[high_index] if high_index < n else np.nan\n        elif alternative == 'greater':\n            p = 1 - confidence_level\n            low_index = int(bd.ppf(p)) - 1\n            low = x[low_index] if low_index >= 0 else np.nan\n            high = np.inf\n        elif alternative == 'two-sided':\n            p = (1 - confidence_level) / 2\n            low_index = int(bd.ppf(p)) - 1\n            low = x[low_index] if low_index >= 0 else np.nan\n            high_index = int(bd.isf(p))\n            high = x[high_index] if high_index < n else np.nan\n        return ConfidenceInterval(low, high)",
    "docstring": "Result of . Attributes ---------- statistic: float The statistic used to calculate the p-value; either `` means there is evidence that it is significantly less than the hypothesized value. pvalue : float The p-value of the hypothesis test.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "ClassDef name:QuantileTestResult Call Call Call FunctionDef name:confidence_interval arg:self arg:confidence_level arguments arg arg Assign Assign Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Raise Call Assign Assign If Compare Assign Assign Assign Call Call Assign Compare If Compare Assign Assign Call Call Assign Compare Assign If Compare Assign Assign Call Call Assign Compare Assign Call Call Assign Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "fqn",
    "source_code": "def fqn(obj: Any):\n    return f'{obj.__module__}.{obj.__qualname__}'",
    "docstring": "Returns the fully qualified name of the object.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\utils.py",
    "ast_data": "FunctionDef name:fqn arg:obj arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_register_namedtuple",
    "source_code": "def _register_namedtuple(cls: type[Any], *, serialized_type_name: str) -> None:\n    _private_register_pytree_node(cls, _namedtuple_flatten, _namedtuple_unflatten, serialized_type_name=serialized_type_name, to_dumpable_context=_namedtuple_serialize, from_dumpable_context=_namedtuple_deserialize, flatten_with_keys_fn=_namedtuple_flatten_with_keys)",
    "docstring": "Registers a namedtuple as a valid pytree node. By default namedtuples are valid pytree nodes, but they are not serializable. This API provides the argument which allows these namedtuples to be serialized. Args: cls: the dataclass type to register serialized_type_name: The serialized name for the dataclass. This is required if you want to serialize the pytree TreeSpec containing this namedtuple.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:_register_namedtuple arg:cls arguments arg arg Call"
  },
  {
    "library": "authlib",
    "name": "authenticate_refresh_token",
    "source_code": "def authenticate_refresh_token(self, refresh_token):\n    raise NotImplementedError()",
    "docstring": "Get token information with refresh_token string. Developers MUST implement this method in subclass:: def authenticate_refresh_token(self, refresh_token): token = Token.get(refresh_token=refresh_token) if token and not token.refresh_token_revoked: return token :param refresh_token: The refresh token issued to the client :return: token",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\refresh_token.py",
    "ast_data": "FunctionDef name:authenticate_refresh_token arg:self arg:refresh_token arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "AveragePooling1D",
    "source_code": "class AveragePooling1D(keras_layers.AveragePooling1D, base.Layer):\n\n    def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n        if strides is None:\n            raise ValueError('Argument `strides` must not be None.')\n        super(AveragePooling1D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)",
    "docstring": "Average Pooling layer for 1D inputs. Args: pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . name: A string, the name of the layer.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\pooling.py",
    "ast_data": "ClassDef name:AveragePooling1D FunctionDef name:__init__ arg:self arg:pool_size arg:strides arg:padding arg:data_format arg:name arguments arg arg arg arg arg arg arg If Compare Raise Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_format_flat",
    "source_code": "@final\ndef _format_flat(self, *, include_name: bool, formatter: Callable | None=None) -> list[str_t]:\n    header = []\n    if include_name:\n        header.append(pprint_thing(self.name, escape_chars=('\\t', '\\r', '\\n')) if self.name is not None else '')\n    if formatter is not None:\n        return header + list(self.map(formatter))\n    return self._format_with_header(header=header, na_rep=self._default_na_rep)",
    "docstring": "Render a string representation of the Index.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_format_flat arg:self arguments arg arg arg Assign If Call Compare Call If Compare Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "make_np",
    "source_code": "def make_np(x: torch.Tensor) -> np.ndarray:\n    if isinstance(x, np.ndarray):\n        return x\n    if np.isscalar(x):\n        return np.array([x])\n    if isinstance(x, torch.Tensor):\n        return _prepare_pytorch(x)\n    raise NotImplementedError(f'Got {type(x)}, but numpy array or torch tensor are expected.')",
    "docstring": "Convert an object into numpy array. Args: x: An instance of torch tensor Returns: numpy.array: Numpy array",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\tensorboard\\_convert_np.py",
    "ast_data": "FunctionDef name:make_np arg:x arguments arg If Call Return return:yes If Call Return return:yes Call If Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_input_dict",
    "source_code": "def _create_input_dict(function_graph, func_arg_placeholders, initial_value=None):\n    if initial_value is None:\n        input_dict = {}\n    else:\n        input_dict = dict(initial_value)\n    for op in function_graph.get_operations():\n        if _is_in_placeholders(op, func_arg_placeholders):\n            input_dict[op.name] = op.name\n        else:\n            op_def = _get_op_def(op)\n            attrs = _get_node_def(op).attr\n            o = 0\n            for arg_def in op_def.output_arg:\n                if arg_def.number_attr:\n                    num = attrs[arg_def.number_attr].i\n                elif arg_def.type_list_attr:\n                    num = len(attrs[arg_def.type_list_attr].list.type)\n                else:\n                    num = 1\n                for i in range(num):\n                    result = '%s:%s:%d' % (op.name, arg_def.name, i)\n                    input_dict[op.values()[o].name] = result\n                    if o == 0:\n                        input_dict[op.name] = result\n                    o += 1\n    return input_dict",
    "docstring": "Create a mapping from graph tensor names to function tensor names.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\graph_to_function_def.py",
    "ast_data": "FunctionDef name:_create_input_dict arg:function_graph arg:func_arg_placeholders arg:initial_value arguments arg arg arg If Compare Assign Assign Call For Call If Call Assign Assign Call Assign Call Assign For If Assign If Assign Call Assign For Call Assign Assign Call If Compare Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "create_rot_n",
    "source_code": "def create_rot_n(n) -> list[Instruction]:\n    if n <= 1:\n        return []\n    if sys.version_info >= (3, 11):\n        return [create_instruction('SWAP', arg=i) for i in range(n, 1, -1)]\n    if sys.version_info < (3, 10) and n >= 5:\n        raise AttributeError(f'rotate {n} not supported for Python < 3.10')\n    if n <= 4:\n        return [create_instruction('ROT_' + ['TWO', 'THREE', 'FOUR'][n - 2])]\n    return [create_instruction('ROT_N', arg=n)]",
    "docstring": "Returns a \"simple\" sequence of instructions that rotates TOS to the n-th position in the stack. For Python = 3.11, any rotation can be expressed as a simple sequence of swaps.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:create_rot_n arg:n arguments arg If Compare Return return:no If Compare Return return:yes Call Call If BoolOp Compare Compare Raise Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__rmod__",
    "source_code": "def __rmod__(self, other):\n    other = as_dimension(other)\n    return other % self",
    "docstring": "Returns modulo . Args: other: Another Dimension, or a value accepted by . Returns: A Dimension whose value is modulo .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__rmod__ arg:self arg:other arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "copy_plural_forms",
    "source_code": "def copy_plural_forms(self, msgs, locale):\n    django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__)))\n    if self.domain == 'djangojs':\n        domains = ('djangojs', 'django')\n    else:\n        domains = ('django',)\n    for domain in domains:\n        django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)\n        if os.path.exists(django_po):\n            with open(django_po, encoding='utf-8') as fp:\n                m = plural_forms_re.search(fp.read())\n            if m:\n                plural_form_line = m['value']\n                if self.verbosity > 1:\n                    self.stdout.write('copying plural forms: %s' % plural_form_line)\n                lines = []\n                found = False\n                for line in msgs.splitlines():\n                    if not found and (not line or plural_forms_re.search(line)):\n                        line = plural_form_line\n                        found = True\n                    lines.append(line)\n                msgs = '\\n'.join(lines)\n                break\n    return msgs",
    "docstring": "Copy plural forms header contents from a Django catalog of locale to the msgs string, inserting it at the right place. msgs should be the contents of a newly created .po file.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\commands\\makemessages.py",
    "ast_data": "FunctionDef name:copy_plural_forms arg:self arg:msgs arg:locale arguments arg arg arg Assign Call Call Call If Compare Assign Assign For Assign Call If Call With Call Assign Call Call If Assign If Compare Call Assign Assign For Call If BoolOp BoolOp Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_safe_mean",
    "source_code": "def _safe_mean(losses, num_present):\n    total_loss = math_ops.reduce_sum(losses)\n    return math_ops.div_no_nan(total_loss, num_present, name='value')",
    "docstring": "Computes a safe mean of the losses. Args: losses: whose elements contain individual loss measurements. num_present: The number of measurable elements in . Returns: A scalar representing the mean of . If is zero, then zero is returned.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\losses_utils.py",
    "ast_data": "FunctionDef name:_safe_mean arg:losses arg:num_present arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "maybe_add_missing_fqns",
    "source_code": "def maybe_add_missing_fqns(results: NSResultsType) -> None:\n    model_name_with_fqns = None\n    for result_type_to_results in results.values():\n        for model_name_to_results in result_type_to_results.values():\n            for model_name, model_results in model_name_to_results.items():\n                if len(model_results) > 0:\n                    if model_results[0]['fqn'] is not None:\n                        model_name_with_fqns = model_name\n                        break\n            break\n        break\n    if model_name_with_fqns:\n        for result_type_to_results in results.values():\n            for model_name_to_results in result_type_to_results.values():\n                ref_model_results = model_name_to_results[model_name_with_fqns]\n                for model_name, model_results in model_name_to_results.items():\n                    if model_name == model_name_with_fqns:\n                        continue\n                    for i in range(len(model_results)):\n                        fqn = ref_model_results[i]['fqn']\n                        model_results[i]['fqn'] = fqn",
    "docstring": "If entries are filled in for one of the models in , copies them over to any models which do not have them filled out. A common use case benefitting from this is comparing a model prepared by quantization to a quantized model. In this case, the model prepared by quantization would have entries, and the quantized model would not.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\ns\\fx\\utils.py",
    "ast_data": "FunctionDef name:maybe_add_missing_fqns arg:results arguments arg Assign For Call For Call For Call If Compare Call If Compare Assign If For Call For Call Assign For Call If Compare For Call Call Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "_add_file",
    "source_code": "def _add_file(self, file_path: str):\n    *dirs, file = file_path.split('/')\n    dir = self._get_dir(dirs)\n    dir.children[file] = Directory(file, False)",
    "docstring": "Adds a file to a Directory. Args: file_path (str): Path of file to add. Last element is added as a file while other paths items are added as directories.",
    "type": "method",
    "file_path": "pytorch\\torch\\package\\file_structure_representation.py",
    "ast_data": "FunctionDef name:_add_file arg:self arg:file_path arguments arg arg Assign Call Assign Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_load_partition_graphs",
    "source_code": "def _load_partition_graphs(self, client_partition_graphs, validate):\n    self._debug_graphs = {}\n    self._node_devices = {}\n    partition_graphs_and_device_names = []\n    for device_name in self._device_names:\n        partition_graph = None\n        if device_name in self._dump_graph_file_paths:\n            partition_graph = _load_graph_def_from_event_file(self._dump_graph_file_paths[device_name])\n        else:\n            logging.warn('Failed to load partition graphs for device %s from disk. As a fallback, the client graphs will be used. This may cause mismatches in device names.' % device_name)\n            partition_graph = self._find_partition_graph(client_partition_graphs, device_name)\n        if partition_graph:\n            partition_graphs_and_device_names.append((partition_graph, device_name))\n    for partition_graph, maybe_device_name in partition_graphs_and_device_names:\n        debug_graph = debug_graphs.DebugGraph(partition_graph, device_name=maybe_device_name)\n        self._debug_graphs[debug_graph.device_name] = debug_graph\n        self._collect_node_devices(debug_graph)\n        if validate and debug_graph.device_name in self._dump_tensor_data:\n            self._validate_dump_with_graphs(debug_graph.device_name)",
    "docstring": "Load and process partition graphs. Load the graphs; parse the input and control input structure; obtain the device and op type of each node; remove the Copy and debug ops inserted by the debugger. The gathered information can be used to validate the tensor dumps. Args: client_partition_graphs: A repeated field of GraphDefs representing the partition graphs executed by the TensorFlow runtime, from the Python client. These partition graphs are used only if partition graphs cannot be loaded from the dump directory on the file system. validate: () Whether the dump files are to be validated against the partition graphs. Raises: ValueError: If the partition GraphDef of one or more devices fail to be loaded.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:_load_partition_graphs arg:self arg:client_partition_graphs arg:validate arguments arg arg arg Assign Assign Assign For Assign If Compare Assign Call Call Assign Call If Call For Assign Call Assign Call If BoolOp Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "transform_function",
    "source_code": "def transform_function(self, fn, user_context):\n    future_features = inspect_utils.getfutureimports(fn)\n    node, source = parser.parse_entity(fn, future_features=future_features)\n    logging.log(3, 'Source code of %s:\\n\\n%s\\n', fn, source)\n    origin_info.resolve_entity(node, source, fn)\n    namespace = inspect_utils.getnamespace(fn)\n    namer = naming.Namer(namespace)\n    new_name = namer.new_symbol(self.get_transformed_name(node), ())\n    entity_info = transformer.EntityInfo(name=new_name, source_code=source, source_file='<fragment>', future_features=future_features, namespace=namespace)\n    context = transformer.Context(entity_info, namer, user_context)\n    node = self._erase_arg_defaults(node)\n    result = self.transform_ast(node, context)\n    return (result, context)",
    "docstring": "Transforms a function. Subclasses may override this method. The return value is opaque. The method receives the original AST. The result is passed as-is to the output of . Args: fn: A function or lambda. user_context: An opaque object (may be None) that is forwarded to transform_ast, through the ctx.user attribute. Returns: Tuple[Any, Any]. By default it returns the output of transform_ast, together with a containing information about the transformation process.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py",
    "ast_data": "FunctionDef name:transform_function arg:self arg:fn arg:user_context arguments arg arg arg Assign Call Assign Call Call Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "get_data_home",
    "source_code": "def get_data_home(data_home=None):\n    if data_home is None:\n        data_home = os.environ.get('SEABORN_DATA', user_cache_dir('seaborn'))\n    data_home = os.path.expanduser(data_home)\n    if not os.path.exists(data_home):\n        os.makedirs(data_home)\n    return data_home",
    "docstring": "Return a path to the cache directory for example datasets. This directory is used by :func:. If the `SEABORN_DATA` environment variable (if it exists) or otherwise default to an OS-appropriate user cache location.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:get_data_home arg:data_home arguments arg If Compare Assign Call Call Assign Call If Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "NSTracer",
    "source_code": "class NSTracer(quantize_fx.QuantizationTracer):\n\n    def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:\n        if isinstance(m, torch.ao.quantization.ObserverBase):\n            return True\n        elif isinstance(m, torch.ao.quantization.FakeQuantizeBase):\n            return True\n        return super().is_leaf_module(m, module_qualified_name)",
    "docstring": "Just like a regular FX quantization tracer, but treats observers and fake_quantize modules as leaf modules.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\ns\\_numeric_suite_fx.py",
    "ast_data": "ClassDef name:NSTracer FunctionDef name:is_leaf_module arg:self arg:m arg:module_qualified_name arguments arg arg arg If Call Return return:yes If Call Return return:yes Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "pdf",
    "source_code": "def pdf(self, x, alpha):\n    alpha = _dirichlet_check_parameters(alpha)\n    x = _dirichlet_check_input(alpha, x)\n    out = np.exp(self._logpdf(x, alpha))\n    return _squeeze_output(out)",
    "docstring": "The Dirichlet probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. %(_dirichlet_doc_default_callparams)s Returns ------- pdf : ndarray or scalar The probability density function evaluated at .",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:pdf arg:self arg:x arg:alpha arguments arg arg arg Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "extract_links",
    "source_code": "def extract_links(self, response: TextResponse) -> list[Link]:\n    base_url = get_base_url(response)\n    if self.restrict_xpaths:\n        docs = [subdoc for x in self.restrict_xpaths for subdoc in response.xpath(x)]\n    else:\n        docs = [response.selector]\n    all_links = []\n    for doc in docs:\n        links = self._extract_links(doc, response.url, response.encoding, base_url)\n        all_links.extend(self._process_links(links))\n    if self.link_extractor.unique:\n        return unique_list(all_links, key=self.link_extractor.link_key)\n    return all_links",
    "docstring": "Returns a list of :class: objects from the specified :class:. Only links that match the settings passed to the ``, otherwise they are returned.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\linkextractors\\lxmlhtml.py",
    "ast_data": "FunctionDef name:extract_links arg:self arg:response arguments arg arg Assign Call If Assign Call Assign Assign For Assign Call Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, operator, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None):\n    parameters = dict(operator=operator, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name)\n    self._operator = operator\n    combine_hint = linear_operator_util.use_operator_or_provided_hint_unless_contradicting\n    is_square = combine_hint(operator, 'is_square', is_square, 'An operator is square if and only if its adjoint is square.')\n    is_non_singular = combine_hint(operator, 'is_non_singular', is_non_singular, 'An operator is non-singular if and only if its adjoint is non-singular.')\n    is_self_adjoint = combine_hint(operator, 'is_self_adjoint', is_self_adjoint, 'An operator is self-adjoint if and only if its adjoint is self-adjoint.')\n    is_positive_definite = combine_hint(operator, 'is_positive_definite', is_positive_definite, 'An operator is positive-definite if and only if its adjoint is positive-definite.')\n    if name is None:\n        name = operator.name + '_adjoint'\n    with ops.name_scope(name):\n        super(LinearOperatorAdjoint, self).__init__(dtype=operator.dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)",
    "docstring": "Initialize a . is initialized with an operator . The and methods effectively flip the argument. E.g. Args: operator: object. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form has positive real part for all nonzero . Note that we do not require the operator to be self-adjoint to be positive-definite. See: is_square: Expect that this operator acts like square [batch] matrices. name: A name for this . Default is . Raises: ValueError: If is False.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_adjoint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:operator arg:is_non_singular arg:is_self_adjoint arg:is_positive_definite arg:is_square arg:name arguments arg arg arg arg arg arg arg Assign Call Assign Assign Assign Call Assign Call Assign Call Assign Call If Compare Assign With Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_create_resource",
    "source_code": "def _create_resource(self):\n    raise NotImplementedError('TrackableResource._create_resource not implemented.')",
    "docstring": "A function that creates a resource handle.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\resource.py",
    "ast_data": "FunctionDef name:_create_resource arg:self arguments arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "libpaths",
    "source_code": "def libpaths(paths, bits):\n    if bits not in (32, 64):\n        raise ValueError('Invalid bit size in libpaths: 32 or 64 only')\n    if bits == 32:\n        return paths\n    out = []\n    for p in paths:\n        out.extend([p + '64', p])\n    return out",
    "docstring": "Return a list of library paths valid on 32 or 64 bit systems. Inputs: paths : sequence A sequence of strings (typically paths) bits : int An integer, the only valid values are 32 or 64. A ValueError exception is raised otherwise. Examples: Consider a list of directories >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] For a 32-bit platform, this is already valid: >>> np.distutils.system_info.libpaths(paths,32) ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] On 64 bits, we prepend the '64' postfix >>> np.distutils.system_info.libpaths(paths,64) ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', '/usr/lib64', '/usr/lib']",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "FunctionDef name:libpaths arg:paths arg:bits arguments arg arg If Compare Raise Call If Compare Return return:yes Assign For Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_variable_initialized",
    "source_code": "def is_variable_initialized(ref, name=None):\n    if ref.dtype._is_ref_dtype:\n        return gen_state_ops.is_variable_initialized(ref=ref, name=name)\n    return ref.is_initialized(name=name)",
    "docstring": "Checks whether a tensor has been initialized. Outputs boolean scalar indicating whether the tensor has been initialized. Args: ref: A mutable . Should be from a node. May be uninitialized. name: A name for the operation (optional). Returns: A of type .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py",
    "ast_data": "FunctionDef name:is_variable_initialized arg:ref arg:name arguments arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "load_public",
    "source_code": "def load_public(self, data: memoryview) -> tuple[ec.EllipticCurvePublicKey, memoryview]:\n    (_, point), data = self.get_public(data)\n    public_key = ec.EllipticCurvePublicKey.from_encoded_point(self.curve, point.tobytes())\n    return (public_key, data)",
    "docstring": "Make ECDSA public key from data.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:load_public arg:self arg:data arguments arg arg Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "make_saveable_from_iterator",
    "source_code": "@tf_export('data.experimental.make_saveable_from_iterator')\n@deprecation.deprecated(None, '`make_saveable_from_iterator` is intended for use in TF1 with `tf.compat.v1.Saver`. In TF2, use `tf.train.Checkpoint` instead.')\ndef make_saveable_from_iterator(iterator, external_state_policy=None):\n    if external_state_policy is None:\n        external_state_policy = 'fail'\n    policy_enum = _convert_external_state_policy_to_enum(external_state_policy)\n    return iterator_ops._IteratorSaveable(iterator._iterator_resource, iterator._iterator_resource.name, external_state_policy=policy_enum)",
    "docstring": "Returns a SaveableObject for saving/restoring iterator state using Saver. Args: iterator: Iterator. external_state_policy: A string that identifies how to handle input pipelines that depend on external state. Possible values are 'ignore': The external state is silently ignored. 'warn': The external state is ignored, logging a warning. 'fail': The operation fails upon encountering external state. By default we set it to 'fail'. Returns: A SaveableObject for saving/restoring iterator state using Saver. Raises: ValueError: If iterator does not support checkpointing. ValueError: If is not one of 'warn', 'ignore' or 'fail'. For example: Note: When restoring the iterator, the existing iterator state is completely discarded. This means that any changes you may have made to the Dataset graph will be discarded as well! This includes the new Dataset graph that you may have built during validation. So, while running validation, make sure to run the initializer for the validation input pipeline after restoring the checkpoint. Note: Not all iterators support checkpointing yet. Attempting to save the state of an unsupported iterator will throw an error.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:make_saveable_from_iterator arg:iterator arg:external_state_policy arguments arg arg If Compare Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "release",
    "source_code": "def release(self) -> None:\n    if self._inference_session is not None:\n        self._inference_session = None\n    gc.collect()\n    if self._tempdir is not None:\n        self._tempdir.cleanup()\n        self._tempdir = None",
    "docstring": "Release the inference session. You may call this method to release the resources used by the inference session.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:release arg:self arguments arg If Compare Assign Call If Compare Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "node_from_map",
    "source_code": "def node_from_map(node_map: Mapping[str, node_def_pb2.NodeDef], name: str) -> node_def_pb2.NodeDef:\n    stripped_name = node_name_from_input(name)\n    if stripped_name not in node_map:\n        raise ValueError(\"No node named '%s' found in map.\" % name)\n    return node_map[stripped_name]",
    "docstring": "Pulls a node def from a dictionary for a given name. Args: node_map: Dictionary containing an entry indexed by name for every node. name: Identifies the node we want to find. Returns: NodeDef of the node with the given name. Raises: ValueError: If the node isn't present in the dictionary.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference_lib.py",
    "ast_data": "FunctionDef name:node_from_map arg:node_map arg:name arguments arg arg Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "KFold",
    "source_code": "class KFold(_UnsupportedGroupCVMixin, _BaseKFold):\n\n    def __init__(self, n_splits=5, *, shuffle=False, random_state=None):\n        super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)\n\n    def _iter_test_indices(self, X, y=None, groups=None):\n        n_samples = _num_samples(X)\n        indices = np.arange(n_samples)\n        if self.shuffle:\n            check_random_state(self.random_state).shuffle(indices)\n        n_splits = self.n_splits\n        fold_sizes = np.full(n_splits, n_samples // n_splits, dtype=int)\n        fold_sizes[:n_samples % n_splits] += 1\n        current = 0\n        for fold_size in fold_sizes:\n            start, stop = (current, current + fold_size)\n            yield indices[start:stop]\n            current = stop",
    "docstring": "K-Fold cross-validator. Provides train/test indices to split data in train/test sets. Split dataset into k consecutive folds (without shuffling by default). Each fold is then used once as a validation while the k - 1 remaining folds form the training set. Read more in the :ref:. For visualisation of cross-validation behaviour and comparison between common scikit-learn split methods refer to :ref: Parameters ---------- n_splits : int, default=5 Number of folds. Must be at least 2. .. versionchanged:: 0.22 `shufflerandom_stateGlossary random_state` to an integer. See Also -------- StratifiedKFold : Takes class information into account to avoid building folds with imbalanced class distributions (for binary or multiclass classification tasks). GroupKFold : K-fold iterator variant with non-overlapping groups. RepeatedKFold : Repeats K-Fold n times.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "ClassDef name:KFold FunctionDef name:__init__ arg:self arg:n_splits arguments arg arg arg arg Call Call FunctionDef name:_iter_test_indices arg:self arg:X arg:y arg:groups arguments arg arg arg arg Assign Call Assign Call If Call Call Assign Assign Call Assign For Assign Assign"
  },
  {
    "library": "scipy",
    "name": "solve",
    "source_code": "def solve(self, trust_radius):\n    p_best = self.newton_point()\n    if scipy.linalg.norm(p_best) < trust_radius:\n        hits_boundary = False\n        return (p_best, hits_boundary)\n    p_u = self.cauchy_point()\n    p_u_norm = scipy.linalg.norm(p_u)\n    if p_u_norm >= trust_radius:\n        p_boundary = p_u * (trust_radius / p_u_norm)\n        hits_boundary = True\n        return (p_boundary, hits_boundary)\n    _, tb = self.get_boundaries_intersections(p_u, p_best - p_u, trust_radius)\n    p_boundary = p_u + tb * (p_best - p_u)\n    hits_boundary = True\n    return (p_boundary, hits_boundary)",
    "docstring": "Minimize a function using the dog-leg trust-region algorithm. This algorithm requires function values and first and second derivatives. It also performs a costly Hessian decomposition for most iterations, and the Hessian is required to be positive definite. Parameters ---------- trust_radius : float We are allowed to wander only this far away from the origin. Returns ------- p : ndarray The proposed step. hits_boundary : bool True if the proposed step is on the boundary of the trust region. Notes ----- The Hessian is required to be positive definite. References ---------- .. [1] Jorge Nocedal and Stephen Wright, Numerical Optimization, second edition, Springer-Verlag, 2006, page 73.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_dogleg.py",
    "ast_data": "FunctionDef name:solve arg:self arg:trust_radius arguments arg arg Assign Call If Compare Call Assign Return return:yes Assign Call Assign Call If Compare Assign Assign Return return:yes Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "desc_sig_keyword_type",
    "source_code": "class desc_sig_keyword_type(desc_sig_element, _sig_element=True):\n    classes = ['kt']",
    "docstring": "Node for a keyword which is a built-in type in a signature.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\addnodes.py",
    "ast_data": "ClassDef name:desc_sig_keyword_type Assign"
  },
  {
    "library": "django",
    "name": "has_add_permission",
    "source_code": "def has_add_permission(self, request):\n    opts = self.opts\n    codename = get_permission_codename('add', opts)\n    return request.user.has_perm('%s.%s' % (opts.app_label, codename))",
    "docstring": "Return True if the given request has permission to add an object. Can be overridden by the user in subclasses.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:has_add_permission arg:self arg:request arguments arg arg Assign Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "create_load_python_module",
    "source_code": "def create_load_python_module(self, mod) -> Instruction:\n    output = self.tx.output\n    global_scope = output.global_scope\n    name = re.sub('^.*[.]', '', mod.__name__)\n    if global_scope.get(name, None) is mod:\n        return self.create_load_global(name, add=True)\n    prefix = f'___module_{name}'\n    global_name = self.tx.output.install_global_by_id(prefix, mod)\n    return self.create_load_global(global_name, add=True)",
    "docstring": "Generate a LOAD_GLOBAL instruction to fetch a given python module.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\codegen.py",
    "ast_data": "FunctionDef name:create_load_python_module arg:self arg:mod arguments arg arg Assign Assign Assign Call If Compare Call Return return:yes Call Assign Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_sf",
    "source_code": "def _sf(self, x, beta, m):\n\n    def rhs(x, beta, m):\n        M = m / beta / (m - 1) * np.exp(-beta ** 2 / 2) + _norm_pdf_C * _norm_cdf(beta)\n        return _norm_pdf_C * _norm_sf(x) / M\n\n    def lhs(x, beta, m):\n        return 1 - self._cdf(x, beta, m)\n    return xpx.apply_where(x > -beta, (x, beta, m), rhs, lhs)",
    "docstring": "Survival function of the crystalball distribution.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_continuous_distns.py",
    "ast_data": "FunctionDef name:_sf arg:self arg:x arg:beta arg:m arguments arg arg arg arg FunctionDef name:rhs arg:x arg:beta arg:m arguments arg arg arg Assign Call Call Return return:yes Call FunctionDef name:lhs arg:x arg:beta arg:m arguments arg arg arg Return return:yes Call Return return:yes Call Compare"
  },
  {
    "library": "pytorch",
    "name": "ConvBn1d",
    "source_code": "class ConvBn1d(_ConvBnNd, nn.Conv1d):\n    _FLOAT_BN_MODULE: ClassVar[type[nn.BatchNorm1d]] = nn.BatchNorm1d\n    _FLOAT_RELU_MODULE: ClassVar[Optional[type[nn.Module]]] = None\n    _FLOAT_MODULE: ClassVar[type[nn.Module]] = nni.ConvBn1d\n    _FLOAT_CONV_MODULE: ClassVar[type[nn.Conv1d]] = nn.Conv1d\n\n    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=None, padding_mode='zeros', eps=1e-05, momentum=0.1, freeze_bn=False, qconfig=None):\n        kernel_size = _single(kernel_size)\n        stride = _single(stride)\n        padding = _single(padding)\n        dilation = _single(dilation)\n        _ConvBnNd.__init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, False, _single(0), groups, bias, padding_mode, eps, momentum, freeze_bn, qconfig, dim=1)",
    "docstring": "A ConvBn1d module is a module fused from Conv1d and BatchNorm1d, attached with FakeQuantize modules for weight, used in quantization aware training. We combined the interface of :class: and :class:. Similar to :class:, with FakeQuantize modules initialized to default. Attributes: freeze_bn: weight_fake_quant: fake quant module for weight",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\conv_fused.py",
    "ast_data": "ClassDef name:ConvBn1d FunctionDef name:__init__ arg:self arg:in_channels arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arg:groups arg:bias arg:padding_mode arg:eps arg:momentum arg:freeze_bn arg:qconfig arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "LambdaCallback",
    "source_code": "class LambdaCallback(Callback):\n\n    def __init__(self, on_epoch_begin=None, on_epoch_end=None, on_batch_begin=None, on_batch_end=None, on_train_begin=None, on_train_end=None, **kwargs):\n        super(LambdaCallback, self).__init__()\n        self.__dict__.update(kwargs)\n        if on_epoch_begin is not None:\n            self.on_epoch_begin = on_epoch_begin\n        else:\n            self.on_epoch_begin = lambda epoch, logs: None\n        if on_epoch_end is not None:\n            self.on_epoch_end = on_epoch_end\n        else:\n            self.on_epoch_end = lambda epoch, logs: None\n        if on_batch_begin is not None:\n            self.on_batch_begin = on_batch_begin\n        else:\n            self.on_batch_begin = lambda batch, logs: None\n        if on_batch_end is not None:\n            self.on_batch_end = on_batch_end\n        else:\n            self.on_batch_end = lambda batch, logs: None\n        if on_train_begin is not None:\n            self.on_train_begin = on_train_begin\n        else:\n            self.on_train_begin = lambda logs: None\n        if on_train_end is not None:\n            self.on_train_end = on_train_end\n        else:\n            self.on_train_end = lambda logs: None",
    "docstring": "Callback for creating simple, custom callbacks on-the-fly. This callback is constructed with anonymous functions that will be called at the appropriate time (during ). Note that the callbacks expects positional arguments, as: - and expect two positional arguments: , - and expect two positional arguments: , - and expect one positional argument: Args: on_epoch_begin: called at the beginning of every epoch. on_epoch_end: called at the end of every epoch. on_batch_begin: called at the beginning of every batch. on_batch_end: called at the end of every batch. on_train_begin: called at the beginning of model training. on_train_end: called at the end of model training. Example:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "ClassDef name:LambdaCallback FunctionDef name:__init__ arg:self arg:on_epoch_begin arg:on_epoch_end arg:on_batch_begin arg:on_batch_end arg:on_train_begin arg:on_train_end arguments arg arg arg arg arg arg arg arg Call Call Call If Compare Assign Assign arguments arg arg If Compare Assign Assign arguments arg arg If Compare Assign Assign arguments arg arg If Compare Assign Assign arguments arg arg If Compare Assign Assign arguments arg If Compare Assign Assign arguments arg"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, image_size: ImageSize, params: Tensor) -> None:\n    if params.shape[-1] != 4 or len(params.shape) > 2:\n        raise ValueError('params must be of shape (B, 4) for PINHOLE Camera')\n    super().__init__(AffineTransform(), Z1Projection(), image_size, params)",
    "docstring": "Construct PinholeModel class. Args: image_size: Image size params: Camera parameters of shape :math: of the form :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\sensors\\camera\\camera_model.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:image_size arg:params arguments arg arg arg If BoolOp Compare Compare Call Raise Call Call Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "axes_dict",
    "source_code": "@property\ndef axes_dict(self):\n    return self._axes_dict",
    "docstring": "A mapping of facet names to corresponding :class:. If only one of `` tuple.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\axisgrid.py",
    "ast_data": "FunctionDef name:axes_dict arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "run_node",
    "source_code": "def run_node(self, node, fx_graph_module: torch.fx.GraphModule, onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher, onnxscript_graph: onnxscript_graph_building.TorchScriptGraph, onnxscript_tracer: onnxscript_graph_building.TorchScriptTracingEvaluator, fx_name_to_onnxscript_value: dict[str, onnxscript_graph_building.TorchScriptTensor | tuple[onnxscript_graph_building.TorchScriptTensor, ...]]):\n    if node.op == 'placeholder':\n        self.placeholder(node, onnxscript_graph, fx_name_to_onnxscript_value)\n    elif node.op == 'get_attr':\n        self.get_attr(node, onnxscript_graph, fx_name_to_onnxscript_value, fx_graph_module)\n    elif node.op == 'call_function':\n        self.call_function(node, onnxscript_tracer, fx_name_to_onnxscript_value, onnxfunction_dispatcher, fx_graph_module)\n    elif node.op == 'call_method':\n        self.call_method(node)\n    elif node.op == 'call_module':\n        self.call_module(node, onnxscript_graph, fx_name_to_onnxscript_value, onnxscript_tracer, fx_graph_module, onnxfunction_dispatcher)\n    elif node.op == 'output':\n        self.output(node, onnxscript_graph, fx_name_to_onnxscript_value)\n    else:\n        raise RuntimeError(f'Found node type not defined in torch.fx: {node.op}')",
    "docstring": "Execute a single FX node to produce its ONNX counterpart. Args: node: The FX node to be translated. fx_graph_module: The FX graph module containing the node. onnxfunction_dispatcher: The dispatcher to find the best matched ONNX op. onnxscript_graph: The ONNX graph to be populated. onnxscript_tracer: The tracer to trace the ONNX graph. fx_name_to_onnxscript_value: The mapping from FX node name to ONNX Script value. Raises: RuntimeError: When a node.op is not supported.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\fx_onnx_interpreter.py",
    "ast_data": "FunctionDef name:run_node arg:self arg:node arg:fx_graph_module arg:onnxfunction_dispatcher arg:onnxscript_graph arg:onnxscript_tracer arg:fx_name_to_onnxscript_value arguments arg arg arg arg arg arg arg If Compare Call If Compare Call If Compare Call If Compare Call If Compare Call If Compare Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "set_metadata",
    "source_code": "def set_metadata(self, **kwargs):\n    if self._traceme and kwargs:\n        self._traceme.SetMetadata(**kwargs)",
    "docstring": "Sets metadata in this trace event. Args: **kwargs: metadata in key-value pairs. This method enables setting metadata in a trace event after it is created. Example usage: In this example, we want to trace how much time spent on calling a function, which includes compilation and execution. The compilation can be either getting a cached copy of the binary or actually generating the binary, which is indicated by the boolean \"in_cache\" returned by jit_compile(). We need to use set_metadata() to pass in_cache because we did not know the in_cache value when the trace was created (and we cannot create the trace after jit_compile(), because we want to measure the entire duration of call()).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\trace.py",
    "ast_data": "FunctionDef name:set_metadata arg:self arguments arg arg If BoolOp Call"
  },
  {
    "library": "sphinx",
    "name": "add_directive",
    "source_code": "def add_directive(self, name: str, cls: type[Directive], override: bool=False) -> None:\n    logger.debug('[app] adding directive: %r', (name, cls))\n    if not override and docutils.is_directive_registered(name):\n        logger.warning(__('directive %r is already registered and will not be overridden'), name, type='app', subtype='add_directive')\n    docutils.register_directive(name, cls)",
    "docstring": "Register a Docutils directive. :param name: The name of the directive :param cls: A directive class :param override: If false, do not install it if another directive is already installed as the same name If true, unconditionally install the directive. For example, a custom directive named `the Docutils docs `__ . .. versionchanged:: 0.6 Docutils 0.5-style directive classes are now supported. .. deprecated:: 1.8 Docutils 0.4-style (function based) directives support is deprecated. .. versionchanged:: 1.8 Add *override* keyword.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\application.py",
    "ast_data": "FunctionDef name:add_directive arg:self arg:name arg:cls arg:override arguments arg arg arg arg Call If BoolOp Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "name",
    "source_code": "def name(self):\n    return self._tpu",
    "docstring": "Return the name of the tpu, or the ip address if name is not provided.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py",
    "ast_data": "FunctionDef name:name arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "__cmp__",
    "source_code": "def __cmp__(self, other):\n    diff = builtins.cmp(self.qvalue, other.qvalue)\n    if diff == 0:\n        diff = builtins.cmp(str(self), str(other))\n    return diff",
    "docstring": "Compare current header to another by qvalues then strings.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:__cmp__ arg:self arg:other arguments arg arg Assign Call If Compare Assign Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "savehist",
    "source_code": "def savehist(maxhist, x, xhist, f, fhist, cstrv, chist, constr, conhist):\n    if len(xhist) < maxhist:\n        xhist.append(x)\n        fhist.append(f)\n        chist.append(cstrv)\n        conhist.append(constr)\n    else:\n        xhist.pop(0)\n        fhist.pop(0)\n        chist.pop(0)\n        conhist.pop(0)\n        xhist.append(x)\n        fhist.append(f)\n        chist.append(cstrv)\n        conhist.append(constr)",
    "docstring": "Save the data values to the history lists. The implementation of this function is vastly different from the Fortran implementation. This is mostly due to the ease of creating and appending to lists in Python However just like the Fortran version we should be concerned about both performance and memory constraints. It will probably be better to initialize an array of NaN for each of the histories and keep track of how many indices we have stored. Not needed for the moment.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\history.py",
    "ast_data": "FunctionDef name:savehist arg:maxhist arg:x arg:xhist arg:f arg:fhist arg:cstrv arg:chist arg:constr arg:conhist arguments arg arg arg arg arg arg arg arg arg If Compare Call Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_paths_to_3d_segments",
    "source_code": "def _paths_to_3d_segments(paths, zs=0, zdir='z'):\n    if not np.iterable(zs):\n        zs = np.broadcast_to(zs, len(paths))\n    elif len(zs) != len(paths):\n        raise ValueError('Number of z-coordinates does not match paths.')\n    segs = [_path_to_3d_segment(path, pathz, zdir) for path, pathz in zip(paths, zs)]\n    return segs",
    "docstring": "Convert paths from a collection object to 3D segments.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py",
    "ast_data": "FunctionDef name:_paths_to_3d_segments arg:paths arg:zs arg:zdir arguments arg arg arg If Call Assign Call Call If Compare Call Call Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "hls_to_rgb",
    "source_code": "def hls_to_rgb(image: Tensor) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    _HLS2RGB = tensor([[[0.0]], [[8.0]], [[4.0]]], device=image.device, dtype=image.dtype)\n    im: Tensor = image.unsqueeze(-4)\n    h_ch: Tensor = im[..., 0, :, :]\n    l_ch: Tensor = im[..., 1, :, :]\n    s_ch: Tensor = im[..., 2, :, :]\n    h_ch = h_ch * (6 / math.pi)\n    a = s_ch * torch.min(l_ch, 1.0 - l_ch)\n    k: Tensor = (h_ch + _HLS2RGB) % 12\n    mink = torch.min(k - 3.0, 9.0 - k)\n    return torch.addcmul(l_ch, a, mink.clamp_(min=-1.0, max=1.0), value=-1)",
    "docstring": "Convert a HLS image to RGB. The image data is assumed to be in the range of (0, 1). Args: image: HLS image to be converted to RGB with shape :math:. Returns: RGB version of the image with shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = hls_to_rgb(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\hls.py",
    "ast_data": "FunctionDef name:hls_to_rgb arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Assign Call Call Assign Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_str_bbox",
    "source_code": "def get_str_bbox(self, s):\n    return self.get_str_bbox_and_descent(s)[:4]",
    "docstring": "Return the string bounding box.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_afm.py",
    "ast_data": "FunctionDef name:get_str_bbox arg:self arg:s arguments arg arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_headers",
    "source_code": "def get_headers(self):\n    return [('Content-Type', 'application/x-www-form-urlencoded'), ('Cache-Control', 'no-store'), ('Pragma', 'no-cache')]",
    "docstring": "Get a list of headers.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\rfc5849\\errors.py",
    "ast_data": "FunctionDef name:get_headers arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "add_label_near",
    "source_code": "def add_label_near(self, x, y, inline=True, inline_spacing=5, transform=None):\n    if transform is None:\n        transform = self.axes.transData\n    if transform:\n        x, y = transform.transform((x, y))\n    idx_level_min, idx_vtx_min, proj = self._find_nearest_contour((x, y), self.labelIndiceList)\n    path = self._paths[idx_level_min]\n    level = self.labelIndiceList.index(idx_level_min)\n    label_width = self._get_nth_label_width(level)\n    rotation, path = self._split_path_and_get_label_rotation(path, idx_vtx_min, proj, label_width, inline_spacing)\n    self.add_label(*proj, rotation, self.labelLevelList[idx_level_min], self.labelCValueList[idx_level_min])\n    if inline:\n        self._paths[idx_level_min] = path",
    "docstring": "Add a label near the point `.TransformFalseFalse.IdentityTransform` should be interpreted as display coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:add_label_near arg:self arg:x arg:y arg:inline arg:inline_spacing arg:transform arguments arg arg arg arg arg arg If Compare Assign If Assign Call Assign Call Assign Assign Call Assign Call Assign Call Call If Assign"
  },
  {
    "library": "pytorch",
    "name": "ExtensionHandler",
    "source_code": "class ExtensionHandler:\n\n    @classmethod\n    def namespace(cls) -> str:\n        raise NotImplementedError(f'{cls.__class__} namespace() must be implemented')\n\n    @classmethod\n    def to_op_name(cls, op) -> str:\n        raise NotImplementedError(f'{cls.__class__} op_name() must be implemented')\n\n    @classmethod\n    def from_op_name(cls, name: str):\n        raise NotImplementedError(f'{cls.__class__} op_name() must be implemented')\n\n    @classmethod\n    def op_schema(cls, op) -> torch.FunctionSchema:\n        raise NotImplementedError(f'{cls.__class__} op_schema() must be implemented')",
    "docstring": "Base class for handling extension operators.",
    "type": "class",
    "file_path": "pytorch\\torch\\_export\\serde\\serialize.py",
    "ast_data": "ClassDef name:ExtensionHandler FunctionDef name:namespace arg:cls arguments arg Raise Call FunctionDef name:to_op_name arg:cls arg:op arguments arg arg Raise Call FunctionDef name:from_op_name arg:cls arg:name arguments arg arg Raise Call FunctionDef name:op_schema arg:cls arg:op arguments arg arg Raise Call"
  },
  {
    "library": "numpy",
    "name": "isalnum",
    "source_code": "def isalnum(self):\n    return isalnum(self)",
    "docstring": "Returns true for each element if all characters in the string are alphanumeric and there is at least one character, false otherwise. See Also -------- char.isalnum",
    "type": "method",
    "file_path": "numpy\\numpy\\_core\\defchararray.py",
    "ast_data": "FunctionDef name:isalnum arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_defun_call",
    "source_code": "@def_function.function\ndef _defun_call(self, inputs):\n    return self._make_op(inputs)",
    "docstring": "Wraps the op creation method in an Eager function for .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:_defun_call arg:self arg:inputs arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "mmread",
    "source_code": "def mmread(source, *, spmatrix=True):\n    cursor, stream_to_close = _get_read_cursor(source)\n    if cursor.header.format == 'array':\n        mat = _read_body_array(cursor)\n        if stream_to_close:\n            stream_to_close.close()\n        return mat\n    else:\n        triplet, shape = _read_body_coo(cursor, generalize_symmetry=True)\n        if stream_to_close:\n            stream_to_close.close()\n        if spmatrix:\n            return coo_matrix(triplet, shape=shape)\n        return coo_array(triplet, shape=shape)",
    "docstring": "Reads the contents of a Matrix Market file-like 'source' into a matrix. Parameters ---------- source : str or file-like Matrix Market filename (extensions .mtx, .mtz.gz) or open file-like object. spmatrix : bool, optional (default: True) If `threadpoolctl `_ to override: >>> import threadpoolctl >>> >>> with threadpoolctl.threadpool_limits(limits=2): ... m = mmread(StringIO(text), spmatrix=False)",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\_fast_matrix_market\\__init__.py",
    "ast_data": "FunctionDef name:mmread arg:source arguments arg arg Assign Call If Compare Assign Call If Call Return return:yes Assign Call If Call If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "as_list",
    "source_code": "def as_list(self):\n    if self._dims is None:\n        raise ValueError('as_list() is not defined on an unknown TensorShape.')\n    return list(self._dims)",
    "docstring": "Returns a list of integers or for each dimension. Returns: A list of integers or for each dimension. Raises: ValueError: If is an unknown shape with an unknown rank.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:as_list arg:self arguments arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "autocorrelation_plot",
    "source_code": "def autocorrelation_plot(series: Series, ax: Axes | None=None, **kwargs) -> Axes:\n    plot_backend = _get_plot_backend('matplotlib')\n    return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs)",
    "docstring": "Autocorrelation plot for time series. Parameters ---------- series : Series The time series to visualize. ax : Matplotlib axis object, optional The matplotlib axis object to use. **kwargs Options to pass to matplotlib plotting method. Returns ------- matplotlib.axes.Axes The matplotlib axes containing the autocorrelation plot. See Also -------- Series.autocorr : Compute the lag-N autocorrelation for a Series. plotting.lag_plot : Lag plot for time series. Examples -------- The horizontal lines in the plot correspond to 95% and 99% confidence bands. The dashed line is 99% confidence band. .. plot:: :context: close-figs >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000) >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing)) >>> pd.plotting.autocorrelation_plot(s) # doctest: +SKIP",
    "type": "function",
    "file_path": "pandas\\pandas\\plotting\\_misc.py",
    "ast_data": "FunctionDef name:autocorrelation_plot arg:series arg:ax arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_navigate_mode",
    "source_code": "def set_navigate_mode(self, b):\n    self._navigate_mode = b",
    "docstring": "Set the navigation toolbar button status. .. warning:: This is not a user-API function.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_navigate_mode arg:self arg:b arguments arg arg Assign"
  },
  {
    "library": "seaborn",
    "name": "__init__",
    "source_code": "def __init__(self, plotter, sizes=None, order=None, norm=None):\n    super().__init__(plotter)\n    data = plotter.plot_data.get('size', pd.Series(dtype=float))\n    if data.notna().any():\n        map_type = self.infer_map_type(norm, sizes, plotter.var_types['size'])\n        if map_type == 'numeric':\n            levels, lookup_table, norm, size_range = self.numeric_mapping(data, sizes, norm)\n        elif map_type == 'categorical':\n            levels, lookup_table = self.categorical_mapping(data, sizes, order)\n            size_range = None\n        else:\n            levels, lookup_table = self.categorical_mapping(list(data), sizes, order)\n            size_range = None\n        self.map_type = map_type\n        self.levels = levels\n        self.norm = norm\n        self.sizes = sizes\n        self.size_range = size_range\n        self.lookup_table = lookup_table",
    "docstring": "Map the levels of the variable to distinct values. Parameters ---------- # TODO add generic parameters",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:plotter arg:sizes arg:order arg:norm arguments arg arg arg arg arg Call Call Assign Call Call If Call Call Assign Call If Compare Assign Call If Compare Assign Call Assign Assign Call Call Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "get_symm_mem_workspace",
    "source_code": "def get_symm_mem_workspace(group_name: str, min_size: int) -> _SymmetricMemory:\n    enable_symm_mem_for_group(group_name)\n    tensor = _group_name_to_workspace_tensor.get(group_name)\n    size = tensor.numel() * tensor.element_size() if tensor is not None else 0\n    if tensor is None or size < min_size:\n        if torch.cuda.is_current_stream_capturing():\n            curr_size = 0 if tensor is None else tensor.numel() * tensor.element_size()\n            raise RuntimeError(f'''get_symm_mem_workspace(): the requested size ({min_size} bytes) is greater than the size of the currently allocated workspace ({curr_size} bytes). It's currently not possible to expand the workspace size during graph capture. Please invoke `get_symm_mem_workspace(group_name=\"{group_name}\", min_size=\"{min_size}\")` before initiating the graph capture and try again.''')\n        tensor = _SymmetricMemory.empty_strided_p2p((max(size, min_size),), [1], torch.uint8, torch.device(f'cuda:{torch.cuda.current_device()}'), group_name)\n        _group_name_to_workspace_tensor[group_name] = tensor\n    return _SymmetricMemory.rendezvous(tensor)",
    "docstring": "Get the symmetric memory workspace associated with the process group. If ``, the workspace will be re-allocated and re-rendezvous'd. Args: group_name (str): the name of the process group. min_size (int): the size requirement for the workspace in bytes. Returns: _SymmetricMemory: the symmetric memory workspace associated with the group.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py",
    "ast_data": "FunctionDef name:get_symm_mem_workspace arg:group_name arg:min_size arguments arg arg Call Assign Call Assign Compare Call Call If BoolOp Compare Compare If Call Assign Compare Call Call Raise Call Assign Call Call Call Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "as_str",
    "source_code": "def as_str(bytes_or_text, encoding='utf-8'):\n    return as_text(bytes_or_text, encoding)",
    "docstring": "Acts as an alias for the function.. Args: bytes_or_text: The input value to be converted. A bytes or unicode object. encoding: Optional string. The encoding to use if bytes_or_text is a bytes object. Defaults to 'utf-8'. Returns: A unicode string. Raises: TypeError: If bytes_or_text is not a bytes or unicode object. UnicodeDecodeError: If bytes_or_text is a bytes object and cannot be decoded using the specified encoding.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\compat.py",
    "ast_data": "FunctionDef name:as_str arg:bytes_or_text arg:encoding arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_raise_degree",
    "source_code": "@staticmethod\ndef _raise_degree(c, d):\n    if d == 0:\n        return c\n    k = c.shape[0] - 1\n    out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)\n    for a in range(c.shape[0]):\n        f = c[a] * comb(k, a)\n        for j in range(d + 1):\n            out[a + j] += f * comb(d, j) / comb(k + d, a + j)\n    return out",
    "docstring": "Raise a degree of a polynomial in the Bernstein basis. Given the coefficients of a polynomial degree , return (the coefficients of) the equivalent polynomial of degree . Parameters ---------- c : array_like coefficient array, 1-D d : integer Returns ------- array coefficient array, 1-D array of length Notes ----- This uses the fact that a Bernstein polynomial can be identically represented as a linear combination of polynomials of a higher degree : .. math:: b_{a, k} = comb(k, a) \\sum_{j=0}^{d} b_{a+j, k+d} \\ comb(d, j) / comb(k+d, a+j)",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:_raise_degree arg:c arg:d arguments arg arg If Compare Return return:yes Assign Assign Call For Call Assign Call For Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "split",
    "source_code": "@tf_export('random.split', 'random.experimental.stateless_split')\n@dispatch.add_dispatch_support\ndef split(seed, num=2, alg='auto_select'):\n    seed = ops.convert_to_tensor(seed)\n    return stateless_random_uniform(shape=[num, 2], seed=seed, dtype=seed.dtype, minval=None, maxval=None, alg=alg)",
    "docstring": "Splits an RNG seed into new seeds by adding a leading axis. Example: >>> seed = [1, 2] >>> new_seeds = tf.random.split(seed, num=3) >>> print(new_seeds) tf.Tensor( [[1105988140 1738052849] [-335576002 370444179] [ 10670227 -246211131]], shape=(3, 2), dtype=int32) >>> tf.random.stateless_normal(shape=[3], seed=new_seeds[0, :]) Args: seed: an RNG seed (a tensor with shape [2] and dtype or ). (When using XLA, only is allowed.) num: optional, a positive integer or scalar tensor indicating the number of seeds to produce (default 2). alg: The RNG algorithm used to generate the random numbers. See for a detailed explanation. Returns: A tensor with shape [num, 2] representing new seeds. It will have the same dtype as (if doesn't have an explicit dtype, the dtype will be determined by ).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateless_random_ops.py",
    "ast_data": "FunctionDef name:split arg:seed arg:num arg:alg arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "__conform__",
    "source_code": "def __conform__(self, proto):\n    from psycopg2.extensions import ISQLQuote\n    if proto == ISQLQuote:\n        return self\n    else:\n        raise Exception('Error implementing psycopg2 protocol. Is psycopg2 installed?')",
    "docstring": "Does the given protocol conform to what Psycopg2 expects?",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\adapter.py",
    "ast_data": "FunctionDef name:__conform__ arg:self arg:proto arguments arg arg If Compare Return return:yes Raise Call"
  },
  {
    "library": "pandas",
    "name": "unbox_index",
    "source_code": "@unbox(IndexType)\ndef unbox_index(typ, obj, c):\n    data_obj = c.pyapi.object_getattr_string(obj, '_numba_data')\n    index = cgutils.create_struct_proxy(typ)(c.context, c.builder)\n    index.data = c.unbox(typ.as_array, data_obj).value\n    typed_dict_obj = c.pyapi.unserialize(c.pyapi.serialize_object(numba.typed.Dict))\n    arr_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.dtype))\n    intp_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(types.intp))\n    hashmap_obj = c.pyapi.call_method(typed_dict_obj, 'empty', (arr_type_obj, intp_type_obj))\n    index.hashmap = c.unbox(types.DictType(typ.dtype, types.intp), hashmap_obj).value\n    index.parent = obj\n    c.pyapi.decref(data_obj)\n    c.pyapi.decref(arr_type_obj)\n    c.pyapi.decref(intp_type_obj)\n    c.pyapi.decref(typed_dict_obj)\n    return NativeValue(index._getvalue())",
    "docstring": "Convert a Index object to a native structure. Note: Object dtype is not allowed here",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\_numba\\extensions.py",
    "ast_data": "FunctionDef name:unbox_index arg:typ arg:obj arg:c arguments arg arg arg Assign Call Assign Call Call Assign Call Assign Call Call Assign Call Call Assign Call Call Assign Call Assign Call Call Assign Call Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "reshard",
    "source_code": "def reshard(self) -> None:\n    state = self._get_fsdp_state()\n    if (fsdp_param_group := state._fsdp_param_group):\n        fsdp_param_group.reshard()",
    "docstring": "Reshards the module's parameters, freeing the unsharded parameters if they are allocated and registering the sharded parameters to the module. This method is *not* recursive.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:reshard arg:self arguments arg Assign Call If Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, ip, port, name=None):\n    self.ip = ip\n    self.port = port\n    if name is None:\n        name = ip\n    self.name = name",
    "docstring": "Initialize a TCP service representation.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\httputil.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ip arg:port arg:name arguments arg arg arg arg Assign Assign If Compare Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "reset_accumulated_host_memory_stats",
    "source_code": "def reset_accumulated_host_memory_stats() -> None:\n    return torch._C._cuda_resetAccumulatedHostMemoryStats()",
    "docstring": "Reset the \"accumulated\" (historical) stats tracked by the host memory allocator. See :func: for details. Accumulated stats correspond to the and keys in each individual stat dict.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:reset_accumulated_host_memory_stats arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "handle_app_config",
    "source_code": "def handle_app_config(self, app_config, **options):\n    raise NotImplementedError('Subclasses of AppCommand must provide a handle_app_config() method.')",
    "docstring": "Perform the command's actions for app_config, an AppConfig instance corresponding to an application label given on the command line.",
    "type": "method",
    "file_path": "django\\django\\core\\management\\base.py",
    "ast_data": "FunctionDef name:handle_app_config arg:self arg:app_config arguments arg arg arg Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "predict_log_proba",
    "source_code": "def predict_log_proba(self, X):\n    xp, _ = get_namespace(X)\n    prediction = self.predict_proba(X)\n    info = xp.finfo(prediction.dtype)\n    if hasattr(info, 'smallest_normal'):\n        smallest_normal = info.smallest_normal\n    else:\n        smallest_normal = info.tiny\n    prediction[prediction == 0.0] += smallest_normal\n    return xp.log(prediction)",
    "docstring": "Estimate log probability. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- C : ndarray of shape (n_samples, n_classes) Estimated log probabilities.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "FunctionDef name:predict_log_proba arg:self arg:X arguments arg arg Assign Call Assign Call Assign Call If Call Assign Assign Compare Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "OrtOperatorSupport",
    "source_code": "class OrtOperatorSupport(OperatorSupport):\n\n    def __init__(self, support_dict: set[Any], extra_support_dict: dict[str, Any]):\n        super().__init__(extra_support_dict)\n        self._onnx_support_dict = support_dict\n\n    def is_node_supported(self, submodules: Mapping[str, torch.nn.Module], node: torch.fx.Node) -> bool:\n        if node.op not in CALLABLE_NODE_OPS:\n            return False\n        if node.op == 'call_function' and node.target in self._onnx_support_dict:\n            logger.info('support_dict supports node.target: %s (type: %s)', node.target, type(node.target))\n            return True\n        if super().is_node_supported(submodules, node):\n            logger.info('extra_support_dict supports node.target: %s (type: %s)', node.target, type(node.target))\n            return True\n        logger.warning(\"support_dict and extra_support_dict don't support node.target: %s (type: %s)\", node.target, type(node.target))\n        return False",
    "docstring": "Operator support for ONNXRuntime backend. It has two-level of support decision. One is via support_dict and the other one is via extra_support_dict. The logic of using support_dict is implemented in OrtOperatorSupport and extra_support_dict is used by OperatorSupport.is_node_supported.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py",
    "ast_data": "ClassDef name:OrtOperatorSupport FunctionDef name:__init__ arg:self arg:support_dict arg:extra_support_dict arguments arg arg arg Call Call Assign FunctionDef name:is_node_supported arg:self arg:submodules arg:node arguments arg arg arg If Compare Return return:yes If BoolOp Compare Compare Call Call Return return:yes If Call Call Call Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "RgbaToBgr",
    "source_code": "class RgbaToBgr(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 4, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n\n    def forward(self, image: Tensor) -> Tensor:\n        return rgba_to_bgr(image)",
    "docstring": "Convert an image from RGBA to BGR. Remove an alpha channel from BGR image. Returns: BGR version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 4, 4, 5) >>> rgba = RgbaToBgr() >>> output = rgba(input) # 2x3x4x5",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\rgb.py",
    "ast_data": "ClassDef name:RgbaToBgr FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "feature_c_preprocessor",
    "source_code": "def feature_c_preprocessor(self, feature_name, tabs=0):\n    assert feature_name.isupper()\n    feature = self.feature_supported.get(feature_name)\n    assert feature is not None\n    prepr = ['/** %s **/' % feature_name, '#define %sHAVE_%s 1' % (self.conf_c_prefix, feature_name)]\n    prepr += ['#include <%s>' % h for h in feature.get('headers', [])]\n    extra_defs = feature.get('group', [])\n    extra_defs += self.feature_extra_checks(feature_name)\n    for edef in extra_defs:\n        prepr += ['#ifndef %sHAVE_%s' % (self.conf_c_prefix, edef), '\\t#define %sHAVE_%s 1' % (self.conf_c_prefix, edef), '#endif']\n    if tabs > 0:\n        prepr = ['\\t' * tabs + l for l in prepr]\n    return '\\n'.join(prepr)",
    "docstring": "Generate C preprocessor definitions and include headers of a CPU feature. Parameters ---------- 'feature_name': str CPU feature name in uppercase. 'tabs': int if > 0, align the generated strings to the right depend on number of tabs. Returns ------- str, generated C preprocessor Examples -------- >>> self.feature_c_preprocessor(\"SSE3\") /** SSE3 **/ #define NPY_HAVE_SSE3 1 #include",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py",
    "ast_data": "FunctionDef name:feature_c_preprocessor arg:self arg:feature_name arg:tabs arguments arg arg arg Call Assign Call Compare Assign Call Assign Call Call For If Compare Assign Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_is_wrapped_coroutine",
    "source_code": "def _is_wrapped_coroutine(obj: Any) -> bool:\n    if isstaticmethod(obj) or isclassmethod(obj) or ispartial(obj):\n        return False\n    return hasattr(obj, '__wrapped__')",
    "docstring": "Check if the object is wrapped coroutine-function.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\inspect.py",
    "ast_data": "FunctionDef name:_is_wrapped_coroutine arg:obj arguments arg If BoolOp Call Call Call Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_update_save_for_backward",
    "source_code": "def _update_save_for_backward(self, new_node: torch.fx.Node) -> None:\n    output_node = None\n    for user in self.reduce_scatter_node.users:\n        if user.target == 'output':\n            output_node = user\n            break\n    if output_node is not None:\n        output_node.replace_input_with(self.reduce_scatter_node, new_node)\n        assert len(self.reduce_scatter_node.users) == 1, 'Reduce scatter node has multiple users, this is not expected'",
    "docstring": "If the output node is a user of the reduce_scatter node (indicating the reduce_scatter result is saved for backward), this method will update the output node to use the fused node instead.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\micro_pipeline_tp.py",
    "ast_data": "FunctionDef name:_update_save_for_backward arg:self arg:new_node arguments arg arg Assign For If Compare Assign If Compare Call Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "new_locator",
    "source_code": "def new_locator(self, nx, nx1=None):\n    return super().new_locator(nx, 0, nx1, 0)",
    "docstring": "Create an axes locator callable for the specified cell. Parameters ---------- nx, nx1 : int Integers specifying the column-position of the cell. When *nx1* is None, a single *nx*-th column is specified. Otherwise, location of columns spanning between *nx* to *nx1* (but excluding *nx1*-th column) is specified.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:new_locator arg:self arg:nx arg:nx1 arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "recursively_deserialize_keras_object",
    "source_code": "def recursively_deserialize_keras_object(config, module_objects=None):\n    if isinstance(config, dict):\n        if 'class_name' in config:\n            return generic_utils.deserialize_keras_object(config, module_objects=module_objects)\n        else:\n            return {key: recursively_deserialize_keras_object(config[key], module_objects) for key in config}\n    if isinstance(config, (tuple, list)):\n        return [recursively_deserialize_keras_object(x, module_objects) for x in config]\n    else:\n        raise ValueError('Unable to decode config: {}'.format(config))",
    "docstring": "Deserialize Keras object from a nested structure.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\load.py",
    "ast_data": "FunctionDef name:recursively_deserialize_keras_object arg:config arg:module_objects arguments arg arg If Call If Compare Return return:yes Call Return return:yes Call If Call Return return:yes Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_sum_states",
    "source_code": "def _sum_states(idx, states):\n    with ops.name_scope('sum_states'):\n        idx = ops.convert_to_tensor(idx, name='idx')\n        num_states = _get_dim(states, 2)\n        states = array_ops.expand_dims(states, axis=2)\n        one_hot = array_ops.one_hot(idx, depth=num_states, on_value=0.0, off_value=math_ops.log(0.0), axis=1)\n        return math_ops.reduce_logsumexp(states + one_hot, axis=-1)",
    "docstring": "Take logsumexp for each unique state out of all label states. Args: idx: tensor of shape [batch, label_length] For each sequence, indices into a set of unique labels as computed by calling unique. states: tensor of shape [frames, batch, label_length] Log probabilities for each label state. Returns: tensor of shape [frames, batch_size, label_length], log probabilities summed for each unique label of the sequence.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ctc_ops.py",
    "ast_data": "FunctionDef name:_sum_states arg:idx arg:states arguments arg arg With Call Assign Call Assign Call Assign Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "step_function",
    "source_code": "def step_function(model, iterator):\n\n    def run_step(data):\n        outputs = model.test_step(data)\n        with ops.control_dependencies(_minimum_control_deps(outputs)):\n            model._test_counter.assign_add(1)\n        return outputs\n    data = next(iterator)\n    outputs = model.distribute_strategy.run(run_step, args=(data,))\n    outputs = reduce_per_replica(outputs, self.distribute_strategy, reduction='first')\n    return outputs",
    "docstring": "Runs a single evaluation step.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:step_function arg:model arg:iterator arguments arg arg FunctionDef name:run_step arg:data arguments arg Assign Call With Call Call Call Return return:yes Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "remove_rubberband",
    "source_code": "def remove_rubberband(self):\n    pass",
    "docstring": "Remove rubberband. This method should get implemented per backend.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py",
    "ast_data": "FunctionDef name:remove_rubberband arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_propagate_module_bias",
    "source_code": "def _propagate_module_bias(module: nn.Module, mask: Tensor) -> Optional[Tensor]:\n    if module.bias is not None:\n        module.bias = nn.Parameter(cast(Tensor, module.bias)[mask])\n    elif getattr(module, '_bias', None) is not None:\n        module.bias = nn.Parameter(cast(Tensor, module._bias)[mask])\n    if getattr(module, '_bias', None) is not None:\n        pruned_biases = cast(Tensor, module._bias)[~mask]\n    else:\n        pruned_biases = None\n    if hasattr(module, '_bias'):\n        delattr(module, '_bias')\n    return pruned_biases",
    "docstring": "In the case that we need to propagate biases, this function will return the biases we need",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\prune_functions.py",
    "ast_data": "FunctionDef name:_propagate_module_bias arg:module arg:mask arguments arg arg If Compare Assign Call Call If Compare Call Assign Call Call If Compare Call Assign Call Assign If Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "describe",
    "source_code": "def describe(self) -> DataFrame:\n    counts = self.value_counts(dropna=False)\n    freqs = counts / counts.sum()\n    from pandas import Index\n    from pandas.core.reshape.concat import concat\n    result = concat([counts, freqs], ignore_index=True, axis=1)\n    result.columns = Index(['counts', 'freqs'])\n    result.index.name = 'categories'\n    return result",
    "docstring": "Describes this Categorical Returns ------- description: A dataframe with frequency and counts by category.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\categorical.py",
    "ast_data": "FunctionDef name:describe arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "CSEVariable",
    "source_code": "class CSEVariable:\n\n    def __init__(self, name: str, bounds: ValueRanges[Any], dtype: Optional[torch.dtype]=None):\n        super().__init__()\n        assert isinstance(bounds, ValueRanges), type(bounds)\n        self.name = name\n        self.bounds = bounds\n        self.use_count = 1\n        self.dtype = dtype\n\n    def __str__(self) -> str:\n        return self.name\n\n    def __hash__(self) -> int:\n        return hash(self.name)\n\n    def __eq__(self, other: object) -> bool:\n        return isinstance(other, CSEVariable) and other.name == self.name\n\n    def update_on_args(self, name: str, args: Any, kwargs: Any) -> None:\n        pass\n\n    def __repr__(self) -> str:\n        return f'{self.__class__.__name__}({self.name!r})'",
    "docstring": "A CSEVariable is just a name for an expression but it is useful to be able to annotate them on a backend dependent basis. To do so, the backends can simply overload The \"CSEVariable.update_on_args\" method gives you a hook for annotations See example of TritonCSEVariable in triton.py",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py",
    "ast_data": "ClassDef name:CSEVariable FunctionDef name:__init__ arg:self arg:name arg:bounds arg:dtype arguments arg arg arg arg Call Call Call Call Assign Assign Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes FunctionDef name:__hash__ arg:self arguments arg Return return:yes Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes BoolOp Call Compare FunctionDef name:update_on_args arg:self arg:name arg:args arg:kwargs arguments arg arg arg arg FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_add_deprecated_arg_notice_to_docstring",
    "source_code": "def _add_deprecated_arg_notice_to_docstring(doc, date, instructions, deprecated_names):\n    deprecation_string = ', '.join(sorted(deprecated_names))\n    return decorator_utils.add_notice_to_docstring(doc, instructions, 'DEPRECATED FUNCTION ARGUMENTS', '(deprecated arguments)', ['SOME ARGUMENTS ARE DEPRECATED: `(%s)`. They will be removed %s.' % (deprecation_string, 'in a future version' if date is None else 'after %s' % date), 'Instructions for updating:'], notice_type='Deprecated')",
    "docstring": "Adds a deprecation notice to a docstring for deprecated arguments.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py",
    "ast_data": "FunctionDef name:_add_deprecated_arg_notice_to_docstring arg:doc arg:date arg:instructions arg:deprecated_names arguments arg arg arg arg Assign Call Call Return return:yes Call Compare"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, callable, name=None):\n    Tool.__init__(self, 'before_handler', callable, name)",
    "docstring": "Initialize a handler tool.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cptools.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:callable arg:name arguments arg arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "ConcaterIterDataPipe",
    "source_code": "@functional_datapipe('concat')\nclass ConcaterIterDataPipe(IterDataPipe):\n    datapipes: tuple[IterDataPipe]\n\n    def __init__(self, *datapipes: IterDataPipe):\n        if len(datapipes) == 0:\n            raise ValueError('Expected at least one DataPipe, but got nothing')\n        if not all((isinstance(dp, IterDataPipe) for dp in datapipes)):\n            raise TypeError('Expected all inputs to be `IterDataPipe`')\n        self.datapipes = datapipes\n\n    def __iter__(self) -> Iterator:\n        for dp in self.datapipes:\n            yield from dp\n\n    def __len__(self) -> int:\n        if all((isinstance(dp, Sized) for dp in self.datapipes)):\n            return sum((len(dp) for dp in self.datapipes))\n        else:\n            raise TypeError(f\"{type(self).__name__} instance doesn't have valid length\")",
    "docstring": "Concatenates multiple Iterable DataPipes (functional name: ``). The resulting DataPipe will yield all the elements from the first input DataPipe, before yielding from the subsequent ones. Args: datapipes: Iterable DataPipes being concatenated Example: >>> # xdoctest: +REQUIRES(module:torchdata) >>> import random >>> from torchdata.datapipes.iter import IterableWrapper >>> dp1 = IterableWrapper(range(3)) >>> dp2 = IterableWrapper(range(5)) >>> list(dp1.concat(dp2)) [0, 1, 2, 0, 1, 2, 3, 4]",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\combining.py",
    "ast_data": "ClassDef name:ConcaterIterDataPipe FunctionDef name:__init__ arg:self arguments arg arg If Compare Call Raise Call If Call Call Raise Call Assign FunctionDef name:__iter__ arg:self arguments arg For FunctionDef name:__len__ arg:self arguments arg If Call Call Return return:yes Call Call Raise Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "build_nccl_then_shuffle",
    "source_code": "def build_nccl_then_shuffle(input_tensors, gather_devices, nccl_red_op, shuffle_red_op, un_op=None):\n\n    def upper_level_f(x):\n        return build_shuffle_all_reduce(x, gather_devices, shuffle_red_op, un_op)\n    return _build_nccl_hybrid(input_tensors, nccl_red_op, upper_level_f)",
    "docstring": "Construct hybrid of NCCL within workers, Shuffle across workers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py",
    "ast_data": "FunctionDef name:build_nccl_then_shuffle arg:input_tensors arg:gather_devices arg:nccl_red_op arg:shuffle_red_op arg:un_op arguments arg arg arg arg arg FunctionDef name:upper_level_f arg:x arguments arg Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "DixonPrice",
    "source_code": "class DixonPrice(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))\n        self.custom_bounds = [(-2, 3), (-2, 3)]\n        self.global_optimum = [[2.0 ** (-(2.0 ** i - 2.0) / 2.0 ** i) for i in range(1, self.N + 1)]]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        i = arange(2, self.N + 1)\n        s = i * (2.0 * x[1:] ** 2.0 - x[:-1]) ** 2.0\n        return sum(s) + (x[0] - 1.0) ** 2.0",
    "docstring": "Dixon and Price objective function. This class defines the Dixon and Price global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{DixonPrice}}(x) = (x_i - 1)^2 + \\sum_{i=2}^n i(2x_i^2 - x_{i-1})^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: Gavana code not correct. i array should start from 2.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_D.py",
    "ast_data": "ClassDef name:DixonPrice Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "polar",
    "source_code": "def polar(*args, **kwargs) -> list[Line2D]:\n    if gcf().get_axes():\n        ax = gca()\n        if not isinstance(ax, PolarAxes):\n            _api.warn_deprecated('3.10', message=\"There exists a non-polar current Axes. Therefore, the resulting plot from 'polar()' is non-polar. You likely should call 'polar()' before any other pyplot plotting commands. Support for this scenario is deprecated in %(since)s and will raise an error in %(removal)s\")\n    else:\n        ax = axes(projection='polar')\n    return ax.plot(*args, **kwargs)",
    "docstring": "Make a polar plot. call signature:: polar(theta, r, [fmt], **kwargs) This is a convenience wrapper around . It ensures that the current Axes is polar (or creates one if needed) and then passes all parameters to `pyplot API ` call will fail.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py",
    "ast_data": "FunctionDef name:polar arguments arg arg If Call Call Assign Call If Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_run",
    "source_code": "@abstractmethod\ndef _run(self):\n    raise NotImplementedError",
    "docstring": "Submits request to the executor and queue the objects.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py",
    "ast_data": "FunctionDef name:_run arg:self arguments arg Raise"
  },
  {
    "library": "tensorflow",
    "name": "source_file_list",
    "source_code": "def source_file_list(self):\n    return tuple(self._host_name_file_path_to_offset.keys())",
    "docstring": "Get a list of source files known to the debugger data reader. Returns: A tuple of tuples.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:source_file_list arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "MutationType",
    "source_code": "class MutationType:\n\n    def __init__(self, typ: SourceType) -> None:\n        if typ is SourceType.Existing:\n            self.scope = 0\n        elif typ is SourceType.New:\n            self.scope = current_scope_id()\n        else:\n            unimplemented_v2(gb_type='Unsupported SourceType', context=f'MutationType.__init__ {self} {typ}', explanation=f'Dynamo does not support the type `{typ}`', hints=['This branch is not supposed to be reachable.', *graph_break_hints.DYNAMO_BUG])",
    "docstring": "Base class for Variable.mutation_type. It encodes information about 1. The type of mutation Dynamo allows on the variable. 2. Whether the value represented by this variable already existed before Dynamo tracing.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "ClassDef name:MutationType FunctionDef name:__init__ arg:self arg:typ arguments arg arg If Compare Assign If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_save_spec",
    "source_code": "def get_save_spec(model):\n    shapes_dict = getattr(model, '_build_shapes_dict', None)\n    if not shapes_dict:\n        return None\n    if 'input_shape' not in shapes_dict:\n        raise ValueError('Model {} cannot be saved because the input shapes have not been set.')\n    input_shape = shapes_dict['input_shape']\n    if isinstance(input_shape, tuple):\n        shape = input_shape\n        shape = (None,) + shape[1:]\n        return tensor_spec.TensorSpec(shape=shape, dtype=model.input_dtype)\n    elif isinstance(input_shape, dict):\n        specs = {}\n        for key, shape in input_shape.items():\n            shape = (None,) + shape[1:]\n            specs[key] = tensor_spec.TensorSpec(shape=shape, dtype=model.input_dtype, name=key)\n        return specs\n    elif isinstance(input_shape, list):\n        specs = []\n        for shape in input_shape:\n            shape = (None,) + shape[1:]\n            specs.append(tensor_spec.TensorSpec(shape=shape, dtype=model.input_dtype))\n        return specs",
    "docstring": "Returns the save spec of the subclassing keras model.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\tflite_keras_util.py",
    "ast_data": "FunctionDef name:get_save_spec arg:model arguments arg Assign Call If Return return:no If Compare Raise Call Assign If Call Assign Assign Return return:yes Call If Call Assign For Call Assign Assign Call Return return:yes If Call Assign For Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "AverageMeter",
    "source_code": "class AverageMeter:\n    val: Union[float, bool, Tensor]\n    _avg: Union[float, Tensor]\n    sum: Union[float, Tensor]\n    count: int\n\n    def __init__(self) -> None:\n        self.reset()\n\n    def reset(self) -> None:\n        self.val = 0\n        self._avg = 0\n        self.sum = 0\n        self.count = 0\n\n    def update(self, val: Union[float, bool, Tensor], n: int=1) -> None:\n        self.val = val\n        self.sum += val * n\n        self.count += n\n        self._avg = self.sum / self.count\n\n    @property\n    def avg(self) -> float:\n        if isinstance(self._avg, Tensor):\n            return float(self._avg.item())\n        return self._avg",
    "docstring": "Computes and stores the average and current value. Example: >>> stats = AverageMeter() >>> acc1 = torch.tensor(0.99) # coming from K.metrics.accuracy >>> stats.update(acc1, n=1) # where n is batch size usually >>> round(stats.avg, 2) 0.99",
    "type": "class",
    "file_path": "kornia\\kornia\\metrics\\average_meter.py",
    "ast_data": "ClassDef name:AverageMeter FunctionDef name:__init__ arg:self arguments arg Call FunctionDef name:reset arg:self arguments arg Assign Assign Assign Assign FunctionDef name:update arg:self arg:val arg:n arguments arg arg arg Assign Assign FunctionDef name:avg arg:self arguments arg If Call Return return:yes Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "minorticks_on",
    "source_code": "def minorticks_on(self):\n    self.ax.minorticks_on()\n    self._short_axis().set_minor_locator(ticker.NullLocator())",
    "docstring": "Turn on colorbar minor ticks.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:minorticks_on arg:self arguments arg Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "set_asyncio_event_loop_policy",
    "source_code": "def set_asyncio_event_loop_policy() -> None:\n    _get_asyncio_event_loop_policy()",
    "docstring": "The policy functions from asyncio often behave unexpectedly, so we restrict their use to the absolutely essential case. This should only be used to install the reactor.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\reactor.py",
    "ast_data": "FunctionDef name:set_asyncio_event_loop_policy arguments Call"
  },
  {
    "library": "pytorch",
    "name": "scale_grads",
    "source_code": "def scale_grads(self, grad_scale_factor: int) -> None:\n    if grad_scale_factor != 1:\n        for p in self.submod.parameters():\n            if p.grad is not None:\n                p.grad.div_(grad_scale_factor)",
    "docstring": "Scale gradients model gradients by , which should be specified in coordination with the loss function used with pipelining. For loss functions which perform 'mean' loss reduction, should be set to num_microbatches. For loss functions that use reduction, should be set to 1. Should only be called once per pipeline schedule step, after all backwards passes have completed.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:scale_grads arg:self arg:grad_scale_factor arguments arg arg If Compare For Call If Compare Call"
  },
  {
    "library": "django",
    "name": "MultiJoin",
    "source_code": "class MultiJoin(Exception):\n\n    def __init__(self, names_pos, path_with_names):\n        self.level = names_pos\n        self.names_with_path = path_with_names",
    "docstring": "Used by join construction code to indicate the point at which a multi-valued join was attempted (if the caller wants to treat that exceptionally).",
    "type": "class",
    "file_path": "django\\django\\db\\models\\sql\\datastructures.py",
    "ast_data": "ClassDef name:MultiJoin FunctionDef name:__init__ arg:self arg:names_pos arg:path_with_names arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "aggregate_global_cache",
    "source_code": "def aggregate_global_cache(self, global_tt_summary_cache):\n    agg_fn_map = self._parameters.get_signature_to_agg_fn_map()\n    signature_idx_map = self._signature_types()\n    aggregation_result = []\n    for signature, idx in sorted(signature_idx_map.items(), key=operator.itemgetter(1)):\n        if signature not in agg_fn_map:\n            raise RuntimeError('No aggregation function is defined for signature %s.' % signature)\n        signature_tensor = global_tt_summary_cache[:, :, idx]\n        agg_fn = agg_fn_map[signature]\n        agg_tensor = agg_fn(signature_tensor, axis=0)\n        aggregation_result.append(agg_tensor)\n    merged_signatures = array_ops_stack.stack(aggregation_result)\n    transposed_signatures = array_ops.transpose(merged_signatures)\n    return array_ops.expand_dims(transposed_signatures, axis=0)",
    "docstring": "Merges the given caches on tpu. Args: global_tt_summary_cache: The global tensor tracer summary cache tensor with shape (num_cores, num_traced_tensors, num_traced_signatures). First dimension corresponds to core_id, where global_tpu_cache_tensor[i] correspond to the local cache from core-i. Returns: An aggregated tf.Tensor. Raises: RuntimeError: if there is no aggregate function defined for a signature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:aggregate_global_cache arg:self arg:global_tt_summary_cache arguments arg arg Assign Call Assign Call Assign For Call Call Call If Compare Raise Call Assign Assign Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_unshard_in_backward",
    "source_code": "def set_unshard_in_backward(self, unshard_in_backward: bool) -> None:\n    state = self._get_fsdp_state()\n    if (fsdp_param_group := state._fsdp_param_group) is not None:\n        fsdp_param_group.unshard_in_backward = unshard_in_backward",
    "docstring": "Sets whether the FSDP module's parameters need to be unsharded in backward. This can be used in expert cases when the user knows that all parameters in this FSDP module's parameter group are not needed for backward computation (e.g. embedding).",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:set_unshard_in_backward arg:self arg:unshard_in_backward arguments arg arg Assign Call If Compare Assign"
  },
  {
    "library": "kornia",
    "name": "_create_pixels_grid",
    "source_code": "def _create_pixels_grid(self) -> tuple[Tensor, Tensor]:\n    height, width = self._image_size\n    pixels_grid: Tensor = create_meshgrid(height, width, normalized_coordinates=False, device=self._device, dtype=self._dtype)\n    pixels_grid = pixels_grid.reshape(-1, 2)\n    ones = torch.ones(pixels_grid.shape[0], 1, device=pixels_grid.device, dtype=pixels_grid.dtype)\n    return (pixels_grid, ones)",
    "docstring": "Create the pixels grid to unproject to plane z=1. Args: image_size: image size: tuple[int, int] Returns: - Pixels grid: Tensor (1, H, W, 2) - Ones: Tensor (H*W, 1)",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\nerf_model.py",
    "ast_data": "FunctionDef name:_create_pixels_grid arg:self arguments arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "blankout",
    "source_code": "def blankout(src, char):\n    return dot_re.sub(char, src)",
    "docstring": "Change every non-whitespace character to the given char. Used in the templatize function.",
    "type": "function",
    "file_path": "django\\django\\utils\\translation\\template.py",
    "ast_data": "FunctionDef name:blankout arg:src arg:char arguments arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "maybe_convert_platform",
    "source_code": "def maybe_convert_platform(values: list | tuple | range | np.ndarray | ExtensionArray) -> ArrayLike:\n    arr: ArrayLike\n    if isinstance(values, (list, tuple, range)):\n        arr = construct_1d_object_array_from_listlike(values)\n    else:\n        arr = values\n    if arr.dtype == _dtype_obj:\n        arr = cast(np.ndarray, arr)\n        arr = lib.maybe_convert_objects(arr)\n    return arr",
    "docstring": "try to do platform conversion, allow ndarray or list here",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:maybe_convert_platform arg:values arguments arg If Call Assign Call Assign If Compare Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "gather",
    "source_code": "def gather(self, indices, name=None):\n    return self._implementation.gather(indices, name=name)",
    "docstring": "Return selected values in the TensorArray as a packed . All of selected values must have been written and their shapes must all match. Args: indices: A taking values in . If the is not dynamic, . name: A name for the operation (optional). Returns: The tensors in the selected by , packed into one tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py",
    "ast_data": "FunctionDef name:gather arg:self arg:indices arg:name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_try_cast",
    "source_code": "def _try_cast(arr: list | np.ndarray, dtype: np.dtype, copy: bool) -> ArrayLike:\n    is_ndarray = isinstance(arr, np.ndarray)\n    if dtype == object:\n        if not is_ndarray:\n            subarr = construct_1d_object_array_from_listlike(arr)\n            return subarr\n        return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy)\n    elif dtype.kind == 'U':\n        if is_ndarray:\n            arr = cast(np.ndarray, arr)\n            shape = arr.shape\n            if arr.ndim > 1:\n                arr = arr.ravel()\n        else:\n            shape = (len(arr),)\n        return lib.ensure_string_array(arr, convert_na_value=False, copy=copy).reshape(shape)\n    elif dtype.kind in 'mM':\n        if is_ndarray:\n            arr = cast(np.ndarray, arr)\n            if arr.ndim == 2 and arr.shape[1] == 1:\n                return maybe_cast_to_datetime(arr[:, 0], dtype).reshape(arr.shape)\n        return maybe_cast_to_datetime(arr, dtype)\n    elif dtype.kind in 'iu':\n        subarr = maybe_cast_to_integer_array(arr, dtype)\n    elif not copy:\n        subarr = np.asarray(arr, dtype=dtype)\n    else:\n        subarr = np.array(arr, dtype=dtype, copy=copy)\n    return subarr",
    "docstring": "Convert input to numpy ndarray and optionally cast to a given dtype. Parameters ---------- arr : ndarray or list Excludes: ExtensionArray, Series, Index. dtype : np.dtype copy : bool If False, don't copy the data if not needed. Returns ------- np.ndarray or ExtensionArray",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\construction.py",
    "ast_data": "FunctionDef name:_try_cast arg:arr arg:dtype arg:copy arguments arg arg arg Assign Call If Compare If Assign Call Return return:yes Return return:yes Call Call If Compare If Assign Call Assign If Compare Assign Call Assign Call Return return:yes Call Call If Compare If Assign Call If BoolOp Compare Compare Return return:yes Call Call Return return:yes Call If Compare Assign Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "EagerGraphCombination",
    "source_code": "class EagerGraphCombination(test_combinations.TestCombination):\n\n    def context_managers(self, kwargs):\n        mode = kwargs.pop('mode', None)\n        if mode is None:\n            return []\n        elif mode == 'eager':\n            return [context.eager_mode()]\n        elif mode == 'graph':\n            return [ops.Graph().as_default(), context.graph_mode()]\n        else:\n            raise ValueError(f\"Argument 'mode' must be either 'eager' or 'graph'. Received: {mode}.\")\n\n    def parameter_modifiers(self):\n        return [test_combinations.OptionalParameter('mode')]",
    "docstring": "Run the test in Graph or Eager mode. The optional parameter controls the test's execution mode. Its accepted values are \"graph\" or \"eager\" literals.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\combinations.py",
    "ast_data": "ClassDef name:EagerGraphCombination FunctionDef name:context_managers arg:self arg:kwargs arguments arg arg Assign Call If Compare Return return:no If Compare Return return:yes Call If Compare Return return:yes Call Call Call Raise Call FunctionDef name:parameter_modifiers arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_reset_locator_formatter_scale",
    "source_code": "def _reset_locator_formatter_scale(self):\n    self._process_values()\n    self._locator = None\n    self._minorlocator = None\n    self._formatter = None\n    self._minorformatter = None\n    if isinstance(self.mappable, contour.ContourSet) and isinstance(self.norm, colors.LogNorm):\n        self._set_scale('log')\n    elif self.boundaries is not None or isinstance(self.norm, colors.BoundaryNorm):\n        if self.spacing == 'uniform':\n            funcs = (self._forward_boundaries, self._inverse_boundaries)\n            self._set_scale('function', functions=funcs)\n        elif self.spacing == 'proportional':\n            self._set_scale('linear')\n    elif getattr(self.norm, '_scale', None):\n        self._set_scale(self.norm._scale)\n    elif type(self.norm) is colors.Normalize:\n        self._set_scale('linear')\n    else:\n        funcs = (self.norm, self.norm.inverse)\n        self._set_scale('function', functions=funcs)",
    "docstring": "Reset the locator et al to defaults. Any user-hardcoded changes need to be re-entered if this gets called (either at init, or when the mappable normal gets changed: Colorbar.update_normal)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py",
    "ast_data": "FunctionDef name:_reset_locator_formatter_scale arg:self arguments arg Call Assign Assign Assign Assign If BoolOp Call Call Call If BoolOp Compare Call If Compare Assign Call If Compare Call If Call Call If Compare Call Call Assign Call"
  },
  {
    "library": "pandas",
    "name": "infer_compression",
    "source_code": "@doc(compression_options=_shared_docs['compression_options'] % 'filepath_or_buffer')\ndef infer_compression(filepath_or_buffer: FilePath | BaseBuffer, compression: str | None) -> str | None:\n    if compression is None:\n        return None\n    if compression == 'infer':\n        if isinstance(filepath_or_buffer, str) and '::' in filepath_or_buffer:\n            filepath_or_buffer = filepath_or_buffer.split('::')[0]\n        filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)\n        if not isinstance(filepath_or_buffer, str):\n            return None\n        for extension, compression in extension_to_compression.items():\n            if filepath_or_buffer.lower().endswith(extension):\n                return compression\n        return None\n    if compression in _supported_compressions:\n        return compression\n    valid = ['infer', None] + sorted(_supported_compressions)\n    msg = f'Unrecognized compression type: {compression}\\nValid compression types are {valid}'\n    raise ValueError(msg)",
    "docstring": "Get the compression method for filepath_or_buffer. If compression='infer', the inferred compression method is returned. Otherwise, the input compression method is returned unchanged, unless it's invalid, in which case an error is raised. Parameters ---------- filepath_or_buffer : str or file handle File path or object. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. Returns ------- string or None Raises ------ ValueError on invalid compression specified.",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\common.py",
    "ast_data": "FunctionDef name:infer_compression arg:filepath_or_buffer arg:compression arguments arg arg If Compare Return return:no If Compare If BoolOp Call Compare Assign Call Assign Call If Call Return return:no For Call If Call Call Return return:yes Return return:no If Compare Return return:yes Assign Call Assign Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "group",
    "source_code": "@tf_export('group')\ndef group(*inputs, **kwargs):\n    if context.executing_eagerly():\n        return None\n    name = kwargs.pop('name', None)\n    if kwargs:\n        raise ValueError('Unknown keyword arguments: ' + ', '.join(kwargs.keys()))\n    with ops.name_scope(name, 'group_deps', inputs) as name:\n        if not inputs:\n            return no_op(name=name)\n        ops_on_device = {}\n        for inp in nest.flatten(inputs, expand_composites=True):\n            if not hasattr(inp, 'device'):\n                raise TypeError(f\"'inputs' should be zero or more (nested) Tensors. Received '{inp}' with type '{type(inp)}'.\")\n            dev = inp.device\n            if dev in ops_on_device:\n                ops_on_device[dev].append(inp)\n            else:\n                ops_on_device[dev] = [inp]\n        if len(ops_on_device) == 1:\n            (dev, deps), = ops_on_device.items()\n            return _GroupControlDeps(dev, deps, name=name)\n        deps = []\n\n        def device_key(dev):\n            return '' if dev is None else dev\n        for dev in sorted(ops_on_device, key=device_key):\n            deps.append(_GroupControlDeps(dev, ops_on_device[dev]))\n        with ops.control_dependencies(deps):\n            return no_op(name=name)",
    "docstring": "Create an op that groups multiple operations. When this op finishes, all ops in have finished. This op has no output. Note: *In TensorFlow 2 with eager and/or Autograph, you should not require this method, as ops execute in the expected order thanks to automatic control dependencies.* Only use when working with v1 code. When operating in a v1-style graph context, ops are not executed in the same order as specified in the code; TensorFlow will attempt to execute ops in parallel or in an order convenient to the result it is computing. allows you to request that one or more results finish before execution continues. creates a single op (of type ), and then adds appropriate control dependencies. Thus, will compute the same graph as this: with tf.control_dependencies([a, b]): c = tf.no_op() See also and . Args: *inputs: Zero or more tensors to group. name: A name for this operation (optional). Returns: An Operation that executes all its inputs. Raises: ValueError: If an unknown keyword argument is provided.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py",
    "ast_data": "FunctionDef name:group arguments arg arg If Call Return return:no Assign Call If Raise Call Call Call With Call If Return return:yes Call Assign For Call If Call Raise Call Call Assign If Compare Call Assign If Compare Call Assign Call Return return:yes Call Assign FunctionDef name:device_key arg:dev arguments arg Return return:yes Compare For Call Call Call With Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "isbuiltin",
    "source_code": "def isbuiltin(object):\n    return _inspect.isbuiltin(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.isbuiltin.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:isbuiltin arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "next_iter",
    "source_code": "def next_iter(self):\n    self._iter += 1\n    self.handles_post_forward_order.clear()\n    if self._checking_order:\n        self.current_order_index = 0\n        if self.warn_status == _ExecOrderWarnStatus.WARNING:\n            self.warn_status = _ExecOrderWarnStatus.WARNED",
    "docstring": "Advances the internal data structures per iteration. This should be called in the post-backward callback since that marks the true end of an iteration.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_exec_order_utils.py",
    "ast_data": "FunctionDef name:next_iter arg:self arguments arg Call If Assign If Compare Assign"
  },
  {
    "library": "pandas",
    "name": "add_tmp",
    "source_code": "def add_tmp(self, value) -> str:\n    name = f'{type(value).__name__}_{self.ntemps}_{_raw_hex_id(self)}'\n    assert name not in self.temps\n    self.temps[name] = value\n    assert name in self.temps\n    return name",
    "docstring": "Add a temporary variable to the scope. Parameters ---------- value : object An arbitrary object to be assigned to a temporary variable. Returns ------- str The name of the temporary variable created.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\scope.py",
    "ast_data": "FunctionDef name:add_tmp arg:self arg:value arguments arg arg Assign Call Call Compare Assign Compare Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_compute_partial_dependence_recursion",
    "source_code": "def _compute_partial_dependence_recursion(self, grid, target_features):\n    if getattr(self, '_fitted_with_sw', False):\n        raise NotImplementedError(\"{} does not support partial dependence plots with the 'recursion' method when sample weights were given during fit time.\".format(self.__class__.__name__))\n    grid = np.asarray(grid, dtype=X_DTYPE, order='C')\n    averaged_predictions = np.zeros((self.n_trees_per_iteration_, grid.shape[0]), dtype=Y_DTYPE)\n    target_features = np.asarray(target_features, dtype=np.intp, order='C')\n    for predictors_of_ith_iteration in self._predictors:\n        for k, predictor in enumerate(predictors_of_ith_iteration):\n            predictor.compute_partial_dependence(grid, target_features, averaged_predictions[k])\n    return averaged_predictions",
    "docstring": "Fast partial dependence computation. Parameters ---------- grid : ndarray, shape (n_samples, n_target_features), dtype=np.float32 The grid points on which the partial dependence should be evaluated. target_features : ndarray, shape (n_target_features), dtype=np.intp The set of target features for which the partial dependence should be evaluated. Returns ------- averaged_predictions : ndarray, shape (n_trees_per_iteration, n_samples) The value of the partial dependence function on each grid point.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:_compute_partial_dependence_recursion arg:self arg:grid arg:target_features arguments arg arg arg If Call Raise Call Call Assign Call Assign Call Assign Call For For Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "compute_local_stride",
    "source_code": "def compute_local_stride(global_stride: ShapeType, mesh: DeviceMesh, placements: Sequence[Placement]) -> tuple[int, ...]:\n    stride_divisors = [1] * len(global_stride)\n    for mesh_idx, p in enumerate(placements):\n        if p.is_shard():\n            i = cast(Shard, p).dim\n            for j in range(len(global_stride)):\n                if global_stride[j] > global_stride[i]:\n                    stride_divisors[j] *= mesh.size(mesh_idx)\n    return tuple((global_stride[i] // stride_divisors[i] for i in range(len(global_stride))))",
    "docstring": "Compute the stride of a local tensor shard, given the global stride of the DTensor. NOTE: Currently this function is assuming the DTensor is evenly shardable.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\tensor\\_utils.py",
    "ast_data": "FunctionDef name:compute_local_stride arg:global_stride arg:mesh arg:placements arguments arg arg arg Assign Call For Call If Call Assign Call For Call Call If Compare Call Return return:yes Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, full_result=False, **mapper_options):\n    import routes\n    self.full_result = full_result\n    self.controllers = {}\n    self.mapper = routes.Mapper(**mapper_options)\n    self.mapper.controller_scan = self.controllers.keys",
    "docstring": "Routes dispatcher. Set full_result to True if you wish the controller and the action to be passed on to the page handler parameters. By default they won't be.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpdispatch.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:full_result arguments arg arg arg Assign Assign Assign Call Assign"
  },
  {
    "library": "cherrypy",
    "name": "start",
    "source_code": "def start(self):\n    if self.running:\n        self.bus.log('Already serving on %s' % self.description)\n        return\n    self.interrupt = None\n    if not self.httpserver:\n        raise ValueError('No HTTP server has been created.')\n    if not os.environ.get('LISTEN_PID', None):\n        if isinstance(self.bind_addr, tuple):\n            portend.free(*self.bind_addr, timeout=Timeouts.free)\n    import threading\n    t = threading.Thread(target=self._start_http_thread)\n    t.name = 'HTTPServer ' + t.name\n    t.start()\n    self.wait()\n    self.running = True\n    self.bus.log('Serving on %s' % self.description)",
    "docstring": "Start the HTTP server.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:start arg:self arguments arg If Call Return return:no Assign If Raise Call If Call If Call Call Assign Call Assign Call Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "_GetNextLogCountPerToken",
    "source_code": "def _GetNextLogCountPerToken(token):\n    global _log_counter_per_token\n    _log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1)\n    return _log_counter_per_token[token]",
    "docstring": "Wrapper for _log_counter_per_token. Args: token: The token for which to look up the count. Returns: The number of times this function has been called with *token* as an argument (starting at 0)",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py",
    "ast_data": "FunctionDef name:_GetNextLogCountPerToken arg:token arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "OpDtypeSupport",
    "source_code": "class OpDtypeSupport:\n    supported_dtypes: dict[str, OrderedSet[torch.dtype]] = {}\n    convert_outputs: dict[str, bool] = {}\n\n    @classmethod\n    def register_upcast(cls, func: Callable[..., str], convert_output: bool) -> None:\n        op_name = func.__name__\n        cls.supported_dtypes[op_name] = OrderedSet([torch.float32, torch.float64])\n        cls.convert_outputs[op_name] = convert_output",
    "docstring": "Some Triton ops such as libdevice and tl.math only support float32 and float64. This class records which dtypes are supported by specific IR ops.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "ClassDef name:OpDtypeSupport FunctionDef name:register_upcast arg:cls arg:func arg:convert_output arguments arg arg arg Assign Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "reduce_variance",
    "source_code": "@dispatch.dispatch_for_api(math_ops.reduce_variance)\ndef reduce_variance(input_tensor: ragged_tensor.Ragged, axis=None, keepdims=False, name=None):\n    with ops.name_scope(name, 'RaggedReduceVariance', [input_tensor, axis]):\n        input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(input_tensor, name='input_tensor')\n        if input_tensor.dtype.is_complex:\n            raise ValueError('reduce_variance is not supported for RaggedTensors with complex dtypes.')\n        square_of_input = math_ops.square(input_tensor)\n        mean_of_square = reduce_mean(square_of_input, axis=axis, keepdims=keepdims)\n        mean = reduce_mean(input_tensor, axis=axis, keepdims=keepdims)\n        square_of_mean = math_ops.square(mean)\n        return math_ops.maximum(mean_of_square - square_of_mean, 0)",
    "docstring": "For docs, see: _RAGGED_REDUCE_DOCSTRING.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_math_ops.py",
    "ast_data": "FunctionDef name:reduce_variance arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg With Call Assign Call If Raise Call Assign Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "real",
    "source_code": "@property\ndef real(self) -> Tensor:\n    return self.w",
    "docstring": "Return the real part with shape :math:. Alias for :func:",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\quaternion.py",
    "ast_data": "FunctionDef name:real arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_tk_widget",
    "source_code": "def get_tk_widget(self):\n    return self._tkcanvas",
    "docstring": "Return the Tk widget used to implement FigureCanvasTkAgg. Although the initial implementation uses a Tk canvas, this routine is intended to hide that fact.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_tk.py",
    "ast_data": "FunctionDef name:get_tk_widget arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "display_modulewise_sac_stats",
    "source_code": "def display_modulewise_sac_stats(self, depth: int=2, print_tabular: bool=False) -> None:\n    for mod_fqn, sac_stats in self.sac_mod_stats.items():\n        mod_depth = mod_fqn.count('.') + 1\n        if mod_depth > depth:\n            continue\n        print(f'Module: {mod_fqn}')\n        self.display_sac_stats(sac_stats, print_tabular)\n        print(f'AC Trade-off for Module: {mod_fqn} MSPS = Memory/Runtime')\n        self.display_sac_tradeoff_stats(self.sac_mod_greedy_order_meta[mod_fqn], sac_stats, print_tabular)",
    "docstring": "Displays the SAC and trade-off statistics for each module. Args: depth (int, optional): The maximum depth of modules to display. Defaults to 2. print_tabular (bool, optional): Whether to print the statistics in a tabular format. Defaults to False. Prints: For each module with depth less than or equal to the specified depth: 1. The SAC statistics for the module (using display_sac_stats). 2. The SAC trade-off statistics for the module (using display_sac_tradeoff_stats). If print_tabular is True, the statistics are printed in a tabular format. Otherwise, the statistics are printed in a plain text format.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\_tools\\sac_estimator.py",
    "ast_data": "FunctionDef name:display_modulewise_sac_stats arg:self arg:depth arg:print_tabular arguments arg arg arg For Call Assign Call If Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "unregister_dispatch_for",
    "source_code": "@tf_export('experimental.unregister_dispatch_for')\ndef unregister_dispatch_for(dispatch_target):\n    found = False\n    for api, signatures in _TYPE_BASED_DISPATCH_SIGNATURES.items():\n        if dispatch_target in signatures:\n            dispatcher = getattr(api, TYPE_BASED_DISPATCH_ATTR)\n            dispatcher.Unregister(dispatch_target)\n            del signatures[dispatch_target]\n            found = True\n    elementwise_keys_to_delete = [key for key, handler in _ELEMENTWISE_API_HANDLERS.items() if handler is dispatch_target]\n    for key in set(elementwise_keys_to_delete):\n        for _, target in _ELEMENTWISE_API_TARGETS[key]:\n            unregister_dispatch_for(target)\n        del _ELEMENTWISE_API_HANDLERS[key]\n        del _ELEMENTWISE_API_TARGETS[key]\n        found = True\n    if not found:\n        raise ValueError(f'Function {dispatch_target} was not registered using a `@dispatch_for_*` decorator.')",
    "docstring": "Unregisters a function that was registered with . This is primarily intended for testing purposes. Example: >>> # Define a type and register a dispatcher to override : >>> class MyTensor(tf.experimental.ExtensionType): ... value: tf.Tensor >>> @tf.experimental.dispatch_for_api(tf.abs) ... def my_abs(x: MyTensor): ... return MyTensor(tf.abs(x.value)) >>> tf.abs(MyTensor(5)) MyTensor(value=) >>> # Unregister the dispatcher, so no longer calls . >>> unregister_dispatch_for(my_abs) >>> tf.abs(MyTensor(5)) Traceback (most recent call last): ... ValueError: Attempt to convert a value ... to a Tensor. Args: dispatch_target: The function to unregister. Raises: ValueError: If was not registered using , , or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py",
    "ast_data": "FunctionDef name:unregister_dispatch_for arg:dispatch_target arguments arg Assign For Call If Compare Assign Call Call Assign Assign Call Compare For Call For Call Assign If Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_eager",
    "source_code": "def _is_eager(self):\n    tensors = nest.flatten(self, expand_composites=True)\n    return all((isinstance(t, ops.EagerTensor) for t in tensors))",
    "docstring": "True if all fields are composed of eager tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_is_eager arg:self arguments arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "initial_form_count",
    "source_code": "def initial_form_count(self):\n    if not self.is_bound:\n        return len(self.get_queryset())\n    return super().initial_form_count()",
    "docstring": "Return the number of forms that are required in this FormSet.",
    "type": "method",
    "file_path": "django\\django\\forms\\models.py",
    "ast_data": "FunctionDef name:initial_form_count arg:self arguments arg If Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_initialize_dict",
    "source_code": "def _initialize_dict(self, X, random_state):\n    if self.dict_init is not None:\n        dictionary = self.dict_init\n    else:\n        _, S, dictionary = _randomized_svd(X, self._n_components, random_state=random_state)\n        dictionary = S[:, np.newaxis] * dictionary\n    if self._n_components <= len(dictionary):\n        dictionary = dictionary[:self._n_components, :]\n    else:\n        dictionary = np.concatenate((dictionary, np.zeros((self._n_components - len(dictionary), dictionary.shape[1]), dtype=dictionary.dtype)))\n    dictionary = check_array(dictionary, order='F', dtype=X.dtype, copy=False)\n    dictionary = np.require(dictionary, requirements='W')\n    return dictionary",
    "docstring": "Initialization of the dictionary.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py",
    "ast_data": "FunctionDef name:_initialize_dict arg:self arg:X arg:random_state arguments arg arg arg If Compare Assign Assign Call Assign If Compare Call Assign Assign Call Call Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "create_authorization_url",
    "source_code": "def create_authorization_url(self, redirect_uri=None, **kwargs):\n    metadata = self.load_server_metadata()\n    authorization_endpoint = self.authorize_url or metadata.get('authorization_endpoint')\n    if not authorization_endpoint:\n        raise RuntimeError('Missing \"authorize_url\" value')\n    if self.authorize_params:\n        kwargs.update(self.authorize_params)\n    with self._get_oauth_client(**metadata) as client:\n        if redirect_uri is not None:\n            client.redirect_uri = redirect_uri\n        return self._create_oauth2_authorization_url(client, authorization_endpoint, **kwargs)",
    "docstring": "Generate the authorization url and state for HTTP redirect. :param redirect_uri: Callback or redirect URI for authorization. :param kwargs: Extra parameters to include. :return: dict",
    "type": "method",
    "file_path": "authlib\\authlib\\integrations\\base_client\\sync_app.py",
    "ast_data": "FunctionDef name:create_authorization_url arg:self arg:redirect_uri arguments arg arg arg Assign Call Assign BoolOp Call If Raise Call If Call With Call If Compare Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "check_reshape_kwargs",
    "source_code": "def check_reshape_kwargs(kwargs):\n    order = kwargs.pop('order', 'C')\n    copy = kwargs.pop('copy', False)\n    if kwargs:\n        raise TypeError(f'reshape() got unexpected keywords arguments: {', '.join(kwargs.keys())}')\n    return (order, copy)",
    "docstring": "Unpack keyword arguments for reshape function. This is useful because keyword arguments after star arguments are not allowed in Python 2, but star keyword arguments are. This function unpacks 'order' and 'copy' from the star keyword arguments (with defaults) and throws an error for any remaining.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\_sputils.py",
    "ast_data": "FunctionDef name:check_reshape_kwargs arg:kwargs arguments arg Assign Call Assign Call If Raise Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_size",
    "source_code": "@tf_export('sets.size', v1=['sets.size', 'sets.set_size'])\n@dispatch.add_dispatch_support\ndef set_size(a, validate_indices=True):\n    a = sparse_tensor.convert_to_tensor_or_sparse_tensor(a, name='a')\n    if not isinstance(a, sparse_tensor.SparseTensor):\n        raise TypeError('Expected `SparseTensor`, got %s.' % a)\n    if a.values.dtype.base_dtype not in _VALID_DTYPES:\n        raise TypeError(f'Invalid dtype `{a.values.dtype}` not in supported dtypes: `{_VALID_DTYPES}`.')\n    return gen_set_ops.set_size(a.indices, a.values, a.dense_shape, validate_indices)",
    "docstring": "Compute number of unique elements along last dimension of . Args: a: , with indices sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in . Note that setting this to allows for undefined behavior when calling this function with invalid indices. Returns: of set sizes. For ranked , this is a with rank , and the same 1st dimensions as . Each value is the number of unique elements in the corresponding dimension of . Raises: TypeError: If is an invalid types.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\sets_impl.py",
    "ast_data": "FunctionDef name:set_size arg:a arg:validate_indices arguments arg arg Assign Call If Call Raise Call If Compare Raise Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "pdf",
    "source_code": "def pdf(self, x, df, scale):\n    return np.exp(self.logpdf(x, df, scale))",
    "docstring": "Inverse Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at Notes ----- %(_doc_callparams_note)s",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:pdf arg:self arg:x arg:df arg:scale arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "push_new_tape",
    "source_code": "def push_new_tape(persistent=False, watch_accessed_variables=True):\n    tape = pywrap_tfe.TFE_Py_TapeSetNew(persistent, watch_accessed_variables)\n    return Tape(tape)",
    "docstring": "Pushes a new tape onto the tape stack.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\tape.py",
    "ast_data": "FunctionDef name:push_new_tape arg:persistent arg:watch_accessed_variables arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "pop_state",
    "source_code": "def pop_state(self) -> None:\n    self._state_stack.pop()",
    "docstring": "Pop a off of the stack.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:pop_state arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_build_submodule",
    "source_code": "def _build_submodule(self, nodes: NodeSet) -> tuple[torch.fx.GraphModule, str]:\n    self._tag_nodes(nodes)\n    split_module = split_by_tags(self.module, ['main_0', 'minimize', 'main_1'])\n    submodule_name: str = ''\n    for child_name, _ in split_module.named_children():\n        if 'minimize' not in child_name:\n            continue\n        if submodule_name == '':\n            submodule_name = child_name\n        else:\n            raise FxNetMinimizerBadModuleError(f'Expected only one minimize submodule with nodes {nodes}')\n    if submodule_name == '':\n        raise FxNetMinimizerBadModuleError(f'Minimize submodule was not found with nodes {nodes}')\n    return (split_module, submodule_name)",
    "docstring": "Split self.module so that one submodule consists of and only . Args: nodes: Nodes that we want to include in the minimize submodule. Returns: split_module (torch.fx.GraphModule): the module after split. submodule_name (str): the name of the submodule that consists of .",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "FunctionDef name:_build_submodule arg:self arg:nodes arguments arg arg Call Assign Call For Call If Compare If Compare Assign Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "django",
    "name": "__str__",
    "source_code": "def __str__(self):\n    return self.name",
    "docstring": "Return the value of the name property.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geomtype.py",
    "ast_data": "FunctionDef name:__str__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "read_value",
    "source_code": "def read_value(self):\n    with ops.name_scope('Read'):\n        value = self._read_variable_op()\n    return array_ops.identity(value)",
    "docstring": "Constructs an op which reads the value of this variable. Should be used when there are multiple reads, or when it is desirable to read the value only after some condition is true. Returns: The value of the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:read_value arg:self arguments arg With Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "variable_captures",
    "source_code": "@property\ndef variable_captures(self):\n    return self.variables",
    "docstring": "Map of python object ids of variables to variables which are captured.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py",
    "ast_data": "FunctionDef name:variable_captures arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "inplace_relu",
    "source_code": "def inplace_relu(X):\n    np.maximum(X, 0, out=X)",
    "docstring": "Compute the rectified linear unit function inplace. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_base.py",
    "ast_data": "FunctionDef name:inplace_relu arg:X arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "def state_dict(self):\n    return {key: value for key, value in self.__dict__.items() if key != 'sparsifier'}",
    "docstring": "Returns the state of the scheduler as a :class:. It contains an entry for every variable in self.__dict__ which is not the sparsifier.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\scheduler\\base_scheduler.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "serialize",
    "source_code": "@abc.abstractmethod\ndef serialize(self):\n    pass",
    "docstring": "Callback to serialize the object. Returns a string.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\python_state.py",
    "ast_data": "FunctionDef name:serialize arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "__tf_unflatten__",
    "source_code": "@classmethod\ndef __tf_unflatten__(cls, metadata, components):\n    pass",
    "docstring": "Create a user-defined object from (metadata, components). Args: metadata: a custom Python object that stands for the static config for reconstructing a new object of the current class. components: a that contains the dynamic data fields of the current class, for object reconstruction. Returns: The user-defined object, with the same class of the current object. Implementation Note: - This method should not invoke any TensorFlow ops. - This method only needs to unflatten the current level. If the object has an attribute that also need custom unflattening, nest functions will utilize this method to do recursive unflattening.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\util\\custom_nest_protocol.py",
    "ast_data": "FunctionDef name:__tf_unflatten__ arg:cls arg:metadata arg:components arguments arg arg arg"
  },
  {
    "library": "tensorflow",
    "name": "AbortedError",
    "source_code": "@tf_export('errors.AbortedError')\nclass AbortedError(OpError):\n\n    def __init__(self, node_def, op, message, *args):\n        super(AbortedError, self).__init__(node_def, op, message, ABORTED, *args)",
    "docstring": "Raised when an operation was aborted, typically due to a concurrent action. For example, running a operation may raise if a operation previously ran.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py",
    "ast_data": "ClassDef name:AbortedError FunctionDef name:__init__ arg:self arg:node_def arg:op arg:message arguments arg arg arg arg arg Call Call Call"
  },
  {
    "library": "pandas",
    "name": "memory_usage_string",
    "source_code": "@property\ndef memory_usage_string(self) -> str:\n    return self.info.memory_usage_string",
    "docstring": "Memory usage string with proper size qualifier.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\info.py",
    "ast_data": "FunctionDef name:memory_usage_string arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "SaveSpec",
    "source_code": "class SaveSpec:\n\n    def __init__(self, tensor, slice_spec, name, dtype=None, device=None):\n        self._tensor = tensor\n        self.slice_spec = slice_spec\n        self.name = name\n        if callable(self._tensor):\n            if dtype is None or device is None:\n                raise AssertionError('When passing a callable `tensor` to a SaveSpec, an explicit dtype and device must be provided.')\n            self.dtype = dtype\n            self.device = device\n        else:\n            self.dtype = tensor.dtype\n            if device is not None:\n                self.device = device\n            else:\n                self.device = tensor.device\n\n    @property\n    def tensor(self):\n        return self._tensor() if callable(self._tensor) else self._tensor",
    "docstring": "Class used to describe tensor slices that need to be saved.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object.py",
    "ast_data": "ClassDef name:SaveSpec FunctionDef name:__init__ arg:self arg:tensor arg:slice_spec arg:name arg:dtype arg:device arguments arg arg arg arg arg arg Assign Assign Assign If Call If BoolOp Compare Compare Raise Call Assign Assign Assign If Compare Assign Assign FunctionDef name:tensor arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "SSIM3DLoss",
    "source_code": "class SSIM3DLoss(Module):\n\n    def __init__(self, window_size: int, max_val: float=1.0, eps: float=1e-12, reduction: str='mean', padding: str='same') -> None:\n        super().__init__()\n        self.window_size: int = window_size\n        self.max_val: float = max_val\n        self.eps: float = eps\n        self.reduction: str = reduction\n        self.padding: str = padding\n\n    def forward(self, img1: Tensor, img2: Tensor) -> Tensor:\n        return ssim3d_loss(img1, img2, self.window_size, self.max_val, self.eps, self.reduction, self.padding)",
    "docstring": "Create a criterion that computes a loss based on the SSIM measurement. The loss, or the Structural dissimilarity (DSSIM) is described as: .. math:: \\text{loss}(x, y) = \\frac{1 - \\text{SSIM}(x, y)}{2} See :meth: for details about SSIM. Args: window_size: the size of the gaussian kernel to smooth the images. max_val: the dynamic range of the images. eps: Small value for numerically stability when dividing. reduction : Specifies the reduction to apply to the output: ``. Whether to only use the \"valid\" convolution area to compute SSIM to match the MATLAB implementation of original SSIM paper. Returns: The loss based on the ssim index. Examples: >>> input1 = torch.rand(1, 4, 5, 5, 5) >>> input2 = torch.rand(1, 4, 5, 5, 5) >>> criterion = SSIM3DLoss(5) >>> loss = criterion(input1, input2)",
    "type": "class",
    "file_path": "kornia\\kornia\\losses\\ssim3d.py",
    "ast_data": "ClassDef name:SSIM3DLoss FunctionDef name:__init__ arg:self arg:window_size arg:max_val arg:eps arg:reduction arg:padding arguments arg arg arg arg arg arg Call Call FunctionDef name:forward arg:self arg:img1 arg:img2 arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "sphinx",
    "name": "_break_word",
    "source_code": "def _break_word(self, word: str, space_left: int) -> tuple[str, str]:\n    total = 0\n    for i, c in enumerate(word):\n        total += column_width(c)\n        if total > space_left:\n            return (word[:i - 1], word[i - 1:])\n    return (word, '')",
    "docstring": "Break line by unicode width instead of len(word).",
    "type": "method",
    "file_path": "sphinx\\sphinx\\writers\\text.py",
    "ast_data": "FunctionDef name:_break_word arg:self arg:word arg:space_left arguments arg arg arg Assign For Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "realize",
    "source_code": "def realize(self) -> 'VariableTracker':\n    return self",
    "docstring": "Used by LazyVariableTracker to build the real VariableTracker",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "FunctionDef name:realize arg:self arguments arg Return return:yes"
  },
  {
    "library": "numpy",
    "name": "LapackSrcNotFoundError",
    "source_code": "class LapackSrcNotFoundError(LapackNotFoundError):\n    pass",
    "docstring": "Lapack ( sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [lapack_src]) or by setting the LAPACK_SRC environment variable.",
    "type": "class",
    "file_path": "numpy\\numpy\\distutils\\system_info.py",
    "ast_data": "ClassDef name:LapackSrcNotFoundError"
  },
  {
    "library": "scipy",
    "name": "VertexScalarField",
    "source_code": "class VertexScalarField(VertexBase):\n\n    def __init__(self, x, field=None, nn=None, index=None, field_args=(), g_cons=None, g_cons_args=()):\n        super().__init__(x, nn=nn, index=index)\n        self.check_min = True\n        self.check_max = True\n\n    def connect(self, v):\n        if v is not self and v not in self.nn:\n            self.nn.add(v)\n            v.nn.add(self)\n            self.check_min = True\n            self.check_max = True\n            v.check_min = True\n            v.check_max = True\n\n    def disconnect(self, v):\n        if v in self.nn:\n            self.nn.remove(v)\n            v.nn.remove(self)\n            self.check_min = True\n            self.check_max = True\n            v.check_min = True\n            v.check_max = True\n\n    def minimiser(self):\n        if self.check_min:\n            self._min = all((self.f < v.f for v in self.nn))\n            self.check_min = False\n        return self._min\n\n    def maximiser(self):\n        if self.check_max:\n            self._max = all((self.f > v.f for v in self.nn))\n            self.check_max = False\n        return self._max",
    "docstring": "Add homology properties of a scalar field f: R^n --> R associated with the geometry built from the VertexBase class",
    "type": "class",
    "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py",
    "ast_data": "ClassDef name:VertexScalarField FunctionDef name:__init__ arg:self arg:x arg:field arg:nn arg:index arg:field_args arg:g_cons arg:g_cons_args arguments arg arg arg arg arg arg arg arg Call Call Assign Assign FunctionDef name:connect arg:self arg:v arguments arg arg If BoolOp Compare Compare Call Call Assign Assign Assign Assign FunctionDef name:disconnect arg:self arg:v arguments arg arg If Compare Call Call Assign Assign Assign Assign FunctionDef name:minimiser arg:self arguments arg If Assign Call Compare Assign Return return:yes FunctionDef name:maximiser arg:self arguments arg If Assign Call Compare Assign Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_get_median",
    "source_code": "def _get_median(data, n_zeros):\n    n_elems = len(data) + n_zeros\n    if not n_elems:\n        return np.nan\n    n_negative = np.count_nonzero(data < 0)\n    middle, is_odd = divmod(n_elems, 2)\n    data.sort()\n    if is_odd:\n        return _get_elem_at_rank(middle, data, n_negative, n_zeros)\n    return (_get_elem_at_rank(middle - 1, data, n_negative, n_zeros) + _get_elem_at_rank(middle, data, n_negative, n_zeros)) / 2.0",
    "docstring": "Compute the median of data with n_zeros additional zeros. This function is used to support sparse matrices; it modifies data in-place.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py",
    "ast_data": "FunctionDef name:_get_median arg:data arg:n_zeros arguments arg arg Assign Call If Return return:yes Assign Call Compare Assign Call Call If Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "ArchiveIndexView",
    "source_code": "class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView):\n    template_name_suffix = '_archive'",
    "docstring": "Top-level archive of date-based items.",
    "type": "class",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "ClassDef name:ArchiveIndexView Assign"
  },
  {
    "library": "tensorflow",
    "name": "most_specific_compatible_type",
    "source_code": "@deprecation.deprecated(None, 'Use most_specific_common_supertype instead.')\ndef most_specific_compatible_type(self, other: 'TypeSpec') -> 'TypeSpec':\n    result = self.most_specific_common_supertype([other])\n    if result is None:\n        raise ValueError('No TypeSpec is compatible with both %s and %s' % (self, other))\n    return result",
    "docstring": "Returns the most specific TypeSpec compatible with and . Deprecated. Please use instead. Do not override this function. Args: other: A . Raises: ValueError: If there is no TypeSpec that is compatible with both and .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py",
    "ast_data": "FunctionDef name:most_specific_compatible_type arg:self arg:other arguments arg arg Assign Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "modified_dogleg",
    "source_code": "def modified_dogleg(A, Y, b, trust_radius, lb, ub):\n    newton_point = -Y.dot(b)\n    if inside_box_boundaries(newton_point, lb, ub) and norm(newton_point) <= trust_radius:\n        x = newton_point\n        return x\n    g = A.T.dot(b)\n    A_g = A.dot(g)\n    cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g\n    origin_point = np.zeros_like(cauchy_point)\n    z = cauchy_point\n    p = newton_point - cauchy_point\n    _, alpha, intersect = box_sphere_intersections(z, p, lb, ub, trust_radius)\n    if intersect:\n        x1 = z + alpha * p\n    else:\n        z = origin_point\n        p = cauchy_point\n        _, alpha, _ = box_sphere_intersections(z, p, lb, ub, trust_radius)\n        x1 = z + alpha * p\n    z = origin_point\n    p = newton_point\n    _, alpha, _ = box_sphere_intersections(z, p, lb, ub, trust_radius)\n    x2 = z + alpha * p\n    if norm(A.dot(x1) + b) < norm(A.dot(x2) + b):\n        return x1\n    else:\n        return x2",
    "docstring": "Approximately minimize ``, the upper bound for the ith component is just ignored. Returns ------- x : array_like, shape (n,) Solution to the problem. Notes ----- Based on implementations described in pp. 885-886 from [1]_. References ---------- .. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal. \"An interior point algorithm for large-scale nonlinear programming.\" SIAM Journal on Optimization 9.4 (1999): 877-900.",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\qp_subproblem.py",
    "ast_data": "FunctionDef name:modified_dogleg arg:A arg:Y arg:b arg:trust_radius arg:lb arg:ub arguments arg arg arg arg arg arg Assign Call If BoolOp Call Compare Call Assign Return return:yes Assign Call Assign Call Assign Call Call Assign Call Assign Assign Assign Call If Assign Assign Assign Assign Call Assign Assign Assign Assign Call Assign If Compare Call Call Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "RandomForestClassifierBenchmark",
    "source_code": "class RandomForestClassifierBenchmark(Predictor, Estimator, Benchmark):\n    param_names = ['representation', 'n_jobs']\n    params = (['dense', 'sparse'], Benchmark.n_jobs_vals)\n\n    def setup_cache(self):\n        super().setup_cache()\n\n    def make_data(self, params):\n        representation, n_jobs = params\n        if representation == 'sparse':\n            data = _20newsgroups_highdim_dataset()\n        else:\n            data = _20newsgroups_lowdim_dataset()\n        return data\n\n    def make_estimator(self, params):\n        representation, n_jobs = params\n        n_estimators = 500 if Benchmark.data_size == 'large' else 100\n        estimator = RandomForestClassifier(n_estimators=n_estimators, min_samples_split=10, max_features='log2', n_jobs=n_jobs, random_state=0)\n        return estimator\n\n    def make_scorers(self):\n        make_gen_classif_scorers(self)",
    "docstring": "Benchmarks for RandomForestClassifier.",
    "type": "class",
    "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\ensemble.py",
    "ast_data": "ClassDef name:RandomForestClassifierBenchmark Assign Assign FunctionDef name:setup_cache arg:self arguments arg Call Call FunctionDef name:make_data arg:self arg:params arguments arg arg Assign If Compare Assign Call Assign Call Return return:yes FunctionDef name:make_estimator arg:self arg:params arguments arg arg Assign Assign Compare Assign Call Return return:yes FunctionDef name:make_scorers arg:self arguments arg Call"
  },
  {
    "library": "tensorflow",
    "name": "assign_sub",
    "source_code": "def assign_sub(self, delta, use_locking=None, name=None, read_value=True):\n    with _handle_graph(self.handle), self._assign_dependencies():\n        assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op(self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name)\n    if read_value:\n        return self._lazy_read(assign_sub_op)\n    return assign_sub_op",
    "docstring": "Subtracts a value from this variable. Args: delta: A . The value to subtract from this variable. use_locking: If , use locking during the operation. name: The name to use for the operation. read_value: A . Whether to read and return the new value of the variable or not. Returns: If is , this method will return the new value of the variable after the assignment has completed. Otherwise, when in graph mode it will return the that does the assignment, and when in eager mode it will return .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:assign_sub arg:self arg:delta arg:use_locking arg:name arg:read_value arguments arg arg arg arg arg With Call Call Assign Call Call If Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "attached_dependencies",
    "source_code": "@property\ndef attached_dependencies(self):\n    return self._attached_dependencies",
    "docstring": "Returns list of dependencies that should be saved in the checkpoint. These dependencies are not tracked by root, but are in the checkpoint. This is defined when the user creates a Checkpoint with both root and kwargs set. Returns: A list of TrackableReferences.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\graph_view.py",
    "ast_data": "FunctionDef name:attached_dependencies arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "main",
    "source_code": "def main(_):\n    code.interact()\n    return 0",
    "docstring": "Run an interactive console.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\simple_console_for_windows.py",
    "ast_data": "FunctionDef name:main arg:_ arguments arg Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_alt_transform",
    "source_code": "def get_alt_transform(self):\n    if self._user_transform is None:\n        return self._alt_transform.frozen()\n    else:\n        return (self._alt_transform + self._user_transform).frozen()",
    "docstring": "Return the transform to be applied to the from .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\markers.py",
    "ast_data": "FunctionDef name:get_alt_transform arg:self arguments arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.components_.shape[0]",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_lda.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "from_dim_sizes",
    "source_code": "@staticmethod\ndef from_dim_sizes(dim_sizes):\n    with ops.name_scope(None, 'RaggedTensorDynamicShapeFromDimensionSizes', [dim_sizes]):\n        dim_sizes = tuple((ops.convert_to_tensor(size, preferred_dtype=dtypes.int64, name='dim_sizes') for size in dim_sizes))\n        inner_split = 0\n        for dim, dim_size in enumerate(dim_sizes):\n            if dim_size.shape.ndims == 1:\n                inner_split = dim + 1\n            elif dim_size.shape.ndims != 0:\n                raise ValueError('Each dim_size must be a scalar or a vector')\n        return RaggedTensorDynamicShape(dim_sizes[:inner_split], dim_sizes[inner_split:])",
    "docstring": "Constructs a ragged shape from a list of dimension sizes. This list contains a single tensor for each dimension, where the tensor is a scalar if the dimension is uniform, or a vector if the dimension is ragged. Args: dim_sizes: List of int32 or int64 scalars or vectors. Returns: A RaggedTensorDynamicShape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py",
    "ast_data": "FunctionDef name:from_dim_sizes arg:dim_sizes arguments arg With Call Assign Call Call Assign For Call If Compare Assign If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "transform_object",
    "source_code": "def transform_object(self, write_item: WriteItem, object: Any):\n    if write_item.type == WriteItemType.BYTE_IO:\n        bytes = io.BytesIO()\n        torch.save(object, bytes)\n        object = bytes\n    return object",
    "docstring": "Extension from the planner interface to make it easy to extend the default planner.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py",
    "ast_data": "FunctionDef name:transform_object arg:self arg:write_item arg:object arguments arg arg arg If Compare Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "logdet",
    "source_code": "@tf_export('linalg.logdet')\n@dispatch.add_dispatch_support\ndef logdet(matrix, name=None):\n    with ops.name_scope(name, 'logdet', [matrix]):\n        chol = gen_linalg_ops.cholesky(matrix)\n        return 2.0 * math_ops.reduce_sum(math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))), axis=[-1])",
    "docstring": "Computes log of the determinant of a hermitian positive definite matrix. Args: matrix: A . Must be , , , , or with shape . name: A name to give this . Defaults to . Returns: The natural log of the determinant of . @compatibility(numpy) Equivalent to numpy.linalg.slogdet, although no sign is returned since only hermitian positive definite matrices are supported. @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linalg_impl.py",
    "ast_data": "FunctionDef name:logdet arg:matrix arg:name arguments arg arg With Call Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "RendezvousGracefulExitError",
    "source_code": "class RendezvousGracefulExitError(RendezvousError):\n    pass",
    "docstring": "Raised when node wasn't not included in rendezvous and gracefully exits. Exception is a mechanism to exit the stack, however does not mean a failure.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py",
    "ast_data": "ClassDef name:RendezvousGracefulExitError"
  },
  {
    "library": "pytorch",
    "name": "_check_is_valid_config_dict",
    "source_code": "def _check_is_valid_config_dict(config_dict: Any, allowed_keys: set[str], dict_name: str) -> None:\n    for k in config_dict.keys():\n        if k not in allowed_keys:\n            raise ValueError('Expected ' + dict_name + ' to have the following keys: ' + str(allowed_keys) + \". But found '\" + k + \"' instead.\")",
    "docstring": "Checks if the given config_dict has the correct keys Args: : dictionary whose keys we want to check",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\qconfig_mapping_utils.py",
    "ast_data": "FunctionDef name:_check_is_valid_config_dict arg:config_dict arg:allowed_keys arg:dict_name arguments arg arg arg For Call If Compare Raise Call Call"
  },
  {
    "library": "virtualenv",
    "name": "ProcessCallFailedError",
    "source_code": "class ProcessCallFailedError(RuntimeError):\n\n    def __init__(self, code, out, err, cmd) -> None:\n        super().__init__(code, out, err, cmd)\n        self.code = code\n        self.out = out\n        self.err = err\n        self.cmd = cmd",
    "docstring": "Failed a process call.",
    "type": "class",
    "file_path": "virtualenv\\src\\virtualenv\\util\\error.py",
    "ast_data": "ClassDef name:ProcessCallFailedError FunctionDef name:__init__ arg:self arg:code arg:out arg:err arg:cmd arguments arg arg arg arg arg Call Call Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, features):\n    self._features = features.copy()\n    self._feature_tensors = {}",
    "docstring": "Creates a . Args: features: A mapping from feature column to objects that are or , or can be converted to same via . A key signifies a base feature (not-transformed). A key means that this is the output of an existing which can be reused.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:features arguments arg arg Assign Call Assign"
  },
  {
    "library": "pytorch",
    "name": "module_outputs",
    "source_code": "def module_outputs(self) -> Sequence[torch.fx.Node]:\n    nodes = list(self.fx_nodes())\n    assert len(nodes) > 0, 'Cannot extract module inputs from empty nodes.'\n    module_outputs: dict[torch.fx.Node, None] = {}\n    node_set: set[torch.fx.Node] = set(nodes)\n    for node in nodes:\n        if any((user not in node_set for user in node.users)):\n            module_outputs[node] = None\n    return list(module_outputs.keys())",
    "docstring": "Extract module outputs from the sequence of fx nodes this instance holds. All nodes that are used by nodes outside of the module are considered module outputs. The order of returned module outputs is the same as the their creation order. ### Known limitations The original ordering of module outputs is not preserved. There is no meta information to be found from the that can be used to recover the original ordering. Returns: Sequence of module outputs.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py",
    "ast_data": "FunctionDef name:module_outputs arg:self arguments arg Assign Call Call Compare Call Call For If Call Compare Assign Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_zlim",
    "source_code": "def get_zlim(self):\n    return tuple(self.zz_viewLim.intervalx)",
    "docstring": "Return the 3D z-axis view limits. Returns ------- left, right : (float, float) The current z-axis limits in data coordinates. See Also -------- set_zlim set_zbound, get_zbound invert_zaxis, zaxis_inverted Notes ----- The z-axis may be inverted, in which case the *left* value will be greater than the *right* value.",
    "type": "method",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py",
    "ast_data": "FunctionDef name:get_zlim arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "quantize",
    "source_code": "@tf_export('quantization.quantize', v1=['quantization.quantize', 'quantize'])\n@dispatch.add_dispatch_support\n@deprecation.deprecated_endpoints('quantize')\ndef quantize(input, min_range, max_range, T, mode='MIN_COMBINED', round_mode='HALF_AWAY_FROM_ZERO', name=None, narrow_range=False, axis=None, ensure_minimum_range=0.01):\n    if ensure_minimum_range != 0.01:\n        return quantize_v2(input, min_range, max_range, T, mode=mode, round_mode=round_mode, name=name, narrow_range=narrow_range, axis=axis, ensure_minimum_range=ensure_minimum_range)\n    return quantize_v2(input, min_range, max_range, T, mode=mode, round_mode=round_mode, name=name, narrow_range=narrow_range, axis=axis)",
    "docstring": "Quantize the input tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:quantize arg:input arg:min_range arg:max_range arg:T arg:mode arg:round_mode arg:name arg:narrow_range arg:axis arg:ensure_minimum_range arguments arg arg arg arg arg arg arg arg arg arg If Compare Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_is_non_interactive_terminal_ipython",
    "source_code": "def _is_non_interactive_terminal_ipython(ip):\n    return hasattr(ip, 'parent') and ip.parent is not None and (getattr(ip.parent, 'interact', None) is False)",
    "docstring": "Return whether we are in a terminal IPython, but non interactive. When in _terminal_ IPython, ip.parent will have and attribute, if this attribute is False we do not setup eventloop integration as the user will _not_ interact with IPython. In all other case (ZMQKernel, or is interactive), we do.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_is_non_interactive_terminal_ipython arg:ip arguments arg Return return:yes BoolOp Call Compare Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "_slice_length",
    "source_code": "def _slice_length(value_length, slice_key):\n    zeros = array_ops.zeros(value_length, dtype=dtypes.bool)\n    return array_ops.size(zeros[slice_key], out_type=value_length.dtype)",
    "docstring": "Computes the number of elements in a slice of a value with a given length. Returns the equivalent of: Args: value_length: Scalar int : the length of the value being sliced. slice_key: A object used to slice elements from the value. Returns: The number of elements in the sliced value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_getitem.py",
    "ast_data": "FunctionDef name:_slice_length arg:value_length arg:slice_key arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_process_quantiles",
    "source_code": "def _process_quantiles(self, x, n, p):\n    xx = np.asarray(x, dtype=int)\n    if xx.ndim == 0:\n        raise ValueError('x must be an array.')\n    if xx.size != 0 and (not xx.shape[-1] == p.shape[-1]):\n        raise ValueError(f'Size of each quantile should be size of p: received {xx.shape[-1]}, but expected {p.shape[-1]}.')\n    cond = np.any(xx != x, axis=-1)\n    cond |= np.any(xx < 0, axis=-1)\n    cond = cond | (np.sum(xx, axis=-1) != n)\n    return (xx, cond)",
    "docstring": "Returns: x_, xcond. x_ is an int array; xcond is a boolean array flagging values out of the domain.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:_process_quantiles arg:self arg:x arg:n arg:p arguments arg arg arg arg Assign Call If Compare Raise Call If BoolOp Compare Compare Raise Call Assign Call Compare Call Compare Assign Compare Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "tmax",
    "source_code": "@xp_capabilities()\n@_axis_nan_policy_factory(lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,))\ndef tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):\n    xp = array_namespace(a)\n    min_ = xp.iinfo(a.dtype).min if xp.isdtype(a.dtype, 'integral') else -xp.inf\n    a, mask = _put_val_to_limits(a, (None, upperlimit), (None, inclusive), val=min_, xp=xp)\n    res = xp.max(a, axis=axis)\n    invalid = xp.all(mask, axis=axis)\n    if is_lazy_array(invalid) or xp.any(invalid):\n        res = xp_promote(res, force_floating=True, xp=xp)\n        res = xp.where(invalid, xp.nan, res)\n    return res[()] if res.ndim == 0 else res",
    "docstring": "Compute the trimmed maximum. This function computes the maximum value of an array along a given axis, while ignoring values larger than a specified upper limit. Parameters ---------- a : array_like Array of values. upperlimit : None or float, optional Values in the input array greater than the given limit will be ignored. When upperlimit is None, then all values are used. The default value is None. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array . inclusive : {True, False}, optional This flag determines whether values exactly equal to the upper limit are included. The default value is True. Returns ------- tmax : float, int or ndarray Trimmed maximum. Examples -------- >>> import numpy as np >>> from scipy import stats >>> x = np.arange(20) >>> stats.tmax(x) 19 >>> stats.tmax(x, 13) 13 >>> stats.tmax(x, 13, inclusive=False) 12",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_stats_py.py",
    "ast_data": "FunctionDef name:tmax arg:a arg:upperlimit arg:axis arg:inclusive arg:nan_policy arguments arg arg arg arg arg Assign Call Assign Call Call Assign Call Assign Call Assign Call If BoolOp Call Call Assign Call Assign Call Return return:yes Compare Call Call arguments arg arguments arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "_fit_context",
    "source_code": "def _fit_context(*, prefer_skip_nested_validation):\n\n    def decorator(fit_method):\n\n        @functools.wraps(fit_method)\n        def wrapper(estimator, *args, **kwargs):\n            global_skip_validation = get_config()['skip_parameter_validation']\n            partial_fit_and_fitted = fit_method.__name__ == 'partial_fit' and _is_fitted(estimator)\n            if not global_skip_validation and (not partial_fit_and_fitted):\n                estimator._validate_params()\n            with config_context(skip_parameter_validation=prefer_skip_nested_validation or global_skip_validation):\n                return fit_method(estimator, *args, **kwargs)\n        return wrapper\n    return decorator",
    "docstring": "Decorator to run the fit methods of estimators within context managers. Parameters ---------- prefer_skip_nested_validation : bool If True, the validation of parameters of inner estimators or functions called during fit will be skipped. This is useful to avoid validating many times the parameters passed by the user from the public facing API. It's also useful to avoid validating parameters that we pass internally to inner functions that are guaranteed to be valid by the test suite. It should be set to True for most estimators, except for those that receive non-validated objects as parameters, such as meta-estimators that are given estimator objects. Returns ------- decorated_fit : method The decorated fit method.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\base.py",
    "ast_data": "FunctionDef name:_fit_context arguments arg FunctionDef name:decorator arg:fit_method arguments arg FunctionDef name:wrapper arg:estimator arguments arg arg arg Assign Call Assign BoolOp Compare Call If BoolOp Call With Call BoolOp Return return:yes Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "all_displays",
    "source_code": "def all_displays():\n    from ._testing import ignore_warnings\n    all_classes = []\n    root = str(Path(__file__).parent.parent)\n    with ignore_warnings(category=FutureWarning):\n        for _, module_name, _ in pkgutil.walk_packages(path=[root], prefix='sklearn.'):\n            module_parts = module_name.split('.')\n            if any((part in _MODULE_TO_IGNORE for part in module_parts)) or '._' in module_name:\n                continue\n            module = import_module(module_name)\n            classes = inspect.getmembers(module, inspect.isclass)\n            classes = [(name, display_class) for name, display_class in classes if not name.startswith('_') and name.endswith('Display')]\n            all_classes.extend(classes)\n    return sorted(set(all_classes), key=itemgetter(0))",
    "docstring": "Get a list of all displays from . Returns ------- displays : list of tuples List of (name, class), where `` is the actual type of the class. Examples -------- >>> from sklearn.utils.discovery import all_displays >>> displays = all_displays() >>> displays[0] ('CalibrationDisplay', )",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\discovery.py",
    "ast_data": "FunctionDef name:all_displays arguments Assign Assign Call Call With Call For Call Assign Call If BoolOp Call Compare Compare Assign Call Assign Call Assign BoolOp Call Call Call Return return:yes Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "verify_installed_reactor",
    "source_code": "def verify_installed_reactor(reactor_path: str) -> None:\n    from twisted.internet import reactor\n    reactor_class = load_object(reactor_path)\n    if not reactor.__class__ == reactor_class:\n        raise RuntimeError(f'The installed reactor ({reactor.__module__}.{reactor.__class__.__name__}) does not match the requested one ({reactor_path})')",
    "docstring": "Raises :exc: if the installed :mod: does not match the specified import path.",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\reactor.py",
    "ast_data": "FunctionDef name:verify_installed_reactor arg:reactor_path arguments arg Assign Call If Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "signbit",
    "source_code": "@staticmethod\ndef signbit(x):\n    return f'std::signbit(static_cast<float>({x}))' if _IS_WINDOWS else f'std::signbit({x})'",
    "docstring": "On windows std::signbit only support float type. Ref:",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py",
    "ast_data": "FunctionDef name:signbit arg:x arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    check_is_fitted(self)\n    if self.n_classes_ > 2 and self.multi_class == 'one_vs_one':\n        raise ValueError('one_vs_one multi-class mode does not support predicting probability estimates. Use one_vs_rest mode instead.')\n    if self.kernel is None or self.kernel.requires_vector_input:\n        X = validate_data(self, X, ensure_2d=True, dtype='numeric', reset=False)\n    else:\n        X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False)\n    return self.base_estimator_.predict_proba(X)",
    "docstring": "Return probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\gaussian_process\\_gpc.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call If BoolOp Compare Compare Raise Call If BoolOp Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_margins",
    "source_code": "def get_margins(self, todo, col):\n    return self.margin_vals[todo][col]",
    "docstring": "Return the margin at this position",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py",
    "ast_data": "FunctionDef name:get_margins arg:self arg:todo arg:col arguments arg arg arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, queries: Tensor, keys: Tensor, values: Tensor, q_mask: Optional[Tensor]=None, kv_mask: Optional[Tensor]=None) -> Tensor:\n    Q = self.feature_map(queries)\n    K = self.feature_map(keys)\n    if q_mask is not None:\n        Q = Q * q_mask[:, :, None, None]\n    if kv_mask is not None:\n        K = K * kv_mask[:, :, None, None]\n        values = values * kv_mask[:, :, None, None]\n    v_length = values.size(1)\n    values = values / v_length\n    KV = torch.einsum('nshd,nshv->nhdv', K, values)\n    Z = 1 / (torch.einsum('nlhd,nhd->nlh', Q, K.sum(dim=1)) + self.eps)\n    queried_values = torch.einsum('nlhd,nhdv,nlh->nlhv', Q, KV, Z) * v_length\n    return queried_values.contiguous()",
    "docstring": "Multi-Head linear attention proposed in \"Transformers are RNNs\". Args: queries: [N, L, H, D] keys: [N, S, H, D] values: [N, S, H, D] q_mask: [N, L] kv_mask: [N, S] Returns: queried_values: (N, L, H, D)",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\loftr\\loftr_module\\linear_attention.py",
    "ast_data": "FunctionDef name:forward arg:self arg:queries arg:keys arg:values arg:q_mask arg:kv_mask arguments arg arg arg arg arg arg Assign Call Assign Call If Compare Assign If Compare Assign Assign Assign Call Assign Assign Call Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_cudnn_rnn_backwardv3",
    "source_code": "@ops.RegisterGradient('CudnnRNNV3')\ndef _cudnn_rnn_backwardv3(op: ops.Operation, *grads):\n    if not op.get_attr('is_training'):\n        raise ValueError('To use CudnnRNNV3 in gradients, is_training must be set to True.')\n    return gen_cudnn_rnn_ops.cudnn_rnn_backprop_v3(input=op.inputs[0], input_h=op.inputs[1], input_c=op.inputs[2], params=op.inputs[3], sequence_lengths=op.inputs[4], output=op.outputs[0], output_h=op.outputs[1], output_c=op.outputs[2], output_backprop=grads[0], output_h_backprop=grads[1], output_c_backprop=grads[2], reserve_space=op.outputs[3], host_reserved=op.outputs[4], dropout=op.get_attr('dropout'), seed=op.get_attr('seed'), seed2=op.get_attr('seed2'), time_major=op.get_attr('time_major'), num_proj=op.get_attr('num_proj'), rnn_mode=op.get_attr('rnn_mode'), input_mode=op.get_attr('input_mode'), direction=op.get_attr('direction')) + (None,)",
    "docstring": "Gradients for the CudnnRNNV3 op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cudnn_rnn_grad.py",
    "ast_data": "FunctionDef name:_cudnn_rnn_backwardv3 arg:op arguments arg arg If Call Raise Call Return return:yes Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "__repr__",
    "source_code": "def __repr__(self) -> str:\n    attrs_list = (f'{attr_name}={getattr(self, attr_name)}' for attr_name in self._attributes if getattr(self, attr_name, None) is not None and attr_name[0] != '_')\n    attrs = ','.join(attrs_list)\n    return f'{type(self).__name__} [{attrs}]'",
    "docstring": "Provide a nice str repr of our rolling object.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\window\\rolling.py",
    "ast_data": "FunctionDef name:__repr__ arg:self arguments arg Assign Call BoolOp Compare Call Compare Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "sequence_reset_by_name_sql",
    "source_code": "def sequence_reset_by_name_sql(self, style, sequences):\n    return []",
    "docstring": "Return a list of the SQL statements required to reset sequences passed in . The argument is a Style object as returned by either color_style() or no_style() in django.core.management.color.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\operations.py",
    "ast_data": "FunctionDef name:sequence_reset_by_name_sql arg:self arg:style arg:sequences arguments arg arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "join",
    "source_code": "def join(self) -> None:\n    self._server.join()",
    "docstring": "Blocks until the server has shut down. This is useful when starting a dedicated worker process. This method currently blocks forever. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while joining the server.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\service\\server_lib.py",
    "ast_data": "FunctionDef name:join arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "_AllGatherRotater",
    "source_code": "class _AllGatherRotater(_RingRotater):\n\n    def __init__(self, pg: dist.ProcessGroup, seq_dim: int) -> None:\n        self._pg = pg\n        self._seq_dim = seq_dim\n        self._aggregated_buffer: Optional[torch.Tensor] = None\n        self._idx = 0\n\n    def exchange_buffers(self, curr_buffer: torch.Tensor) -> None:\n        self._idx += 1\n        if self._aggregated_buffer is None:\n            self._aggregated_buffer = ft_c.all_gather_tensor(curr_buffer.contiguous(), gather_dim=0, group=self._pg)\n\n    def next_buffer(self) -> torch.Tensor:\n        rank = dist.get_rank(self._pg)\n        idx = rank - self._idx\n        assert self._aggregated_buffer is not None\n        self._aggregated_buffer = _maybe_wait(self._aggregated_buffer)\n        return self._aggregated_buffer.chunk(dist.get_world_size(self._pg))[idx]",
    "docstring": "Allgather the kv and return the only the requried kv. Only one communication will be done.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\tensor\\experimental\\_attention.py",
    "ast_data": "ClassDef name:_AllGatherRotater FunctionDef name:__init__ arg:self arg:pg arg:seq_dim arguments arg arg arg Assign Assign Assign FunctionDef name:exchange_buffers arg:self arg:curr_buffer arguments arg arg If Compare Assign Call Call FunctionDef name:next_buffer arg:self arguments arg Assign Call Assign Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "EfficientViTConfig",
    "source_code": "@dataclass\nclass EfficientViTConfig:\n    checkpoint: str = field(default_factory=_get_base_url)\n\n    @classmethod\n    def from_pretrained(cls, model_type: Literal['b1', 'b2', 'b3'], resolution: Literal[224, 256, 288]) -> EfficientViTConfig:\n        return cls(checkpoint=_get_base_url(model_type=model_type, resolution=resolution))",
    "docstring": "Configuration to construct EfficientViT model. Model weights can be loaded from a checkpoint URL or local path. The model weights are hosted on HuggingFace's model hub: Args: checkpoint: URL or local path of model weights.",
    "type": "class",
    "file_path": "kornia\\kornia\\contrib\\models\\efficient_vit\\model.py",
    "ast_data": "ClassDef name:EfficientViTConfig Call FunctionDef name:from_pretrained arg:cls arg:model_type arg:resolution arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "kornia",
    "name": "to_numpy",
    "source_code": "def to_numpy(self) -> np_ndarray:\n    return self.data.cpu().detach().numpy()",
    "docstring": "Return a numpy array in cpu from the image tensor.",
    "type": "method",
    "file_path": "kornia\\kornia\\image\\image.py",
    "ast_data": "FunctionDef name:to_numpy arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_CoordinatedSessionCreator",
    "source_code": "class _CoordinatedSessionCreator(SessionCreator):\n\n    def __init__(self, session_creator, hooks, stop_grace_period_secs):\n        self._session_creator = session_creator\n        self._hooks = hooks\n        self.coord = None\n        self.tf_sess = None\n        self._stop_grace_period_secs = stop_grace_period_secs\n\n    def create_session(self):\n        self.tf_sess = self._session_creator.create_session()\n        self.coord = coordinator.Coordinator(clean_stop_exception_types=[])\n        if ops.get_collection(ops.GraphKeys.QUEUE_RUNNERS):\n            queue_runner.start_queue_runners(sess=self.tf_sess, coord=self.coord)\n        for hook in self._hooks:\n            hook.after_create_session(self.tf_sess, self.coord)\n        return _CoordinatedSession(_HookedSession(self.tf_sess, self._hooks), self.coord, self._stop_grace_period_secs)",
    "docstring": "Factory for _CoordinatedSession.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py",
    "ast_data": "ClassDef name:_CoordinatedSessionCreator FunctionDef name:__init__ arg:self arg:session_creator arg:hooks arg:stop_grace_period_secs arguments arg arg arg arg Assign Assign Assign Assign Assign FunctionDef name:create_session arg:self arguments arg Assign Call Assign Call If Call Call For Call Return return:yes Call Call"
  },
  {
    "library": "numpy",
    "name": "_vander_nd_flat",
    "source_code": "def _vander_nd_flat(vander_fs, points, degrees):\n    v = _vander_nd(vander_fs, points, degrees)\n    return v.reshape(v.shape[:-len(degrees)] + (-1,))",
    "docstring": "Like , but flattens the last `` functions.",
    "type": "function",
    "file_path": "numpy\\numpy\\polynomial\\polyutils.py",
    "ast_data": "FunctionDef name:_vander_nd_flat arg:vander_fs arg:points arg:degrees arguments arg arg arg Assign Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "set_patchB",
    "source_code": "def set_patchB(self, patchB):\n    self.patchB = patchB\n    self.stale = True",
    "docstring": "Set the head patch. Parameters ---------- patchB :",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_patchB arg:self arg:patchB arguments arg arg Assign Assign"
  },
  {
    "library": "scrapy",
    "name": "normvalue",
    "source_code": "def normvalue(self, value: Any) -> Any:\n    return value",
    "docstring": "Method to normalize values prior to be set",
    "type": "method",
    "file_path": "scrapy\\scrapy\\utils\\datatypes.py",
    "ast_data": "FunctionDef name:normvalue arg:self arg:value arguments arg arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "scale_factors",
    "source_code": "@property\ndef scale_factors(self):\n    compressed_triangles = self._triangulation.get_masked_triangles()\n    node_used = np.bincount(np.ravel(compressed_triangles), minlength=self._triangulation.x.size) != 0\n    return (1 / np.ptp(self._triangulation.x[node_used]), 1 / np.ptp(self._triangulation.y[node_used]))",
    "docstring": "Factors to rescale the triangulation into a unit square. Returns ------- (float, float) Scaling factors (kx, ky) so that the triangulation `` fits exactly inside a unit square.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_tritools.py",
    "ast_data": "FunctionDef name:scale_factors arg:self arguments arg Assign Call Assign Compare Call Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "EnumSessions",
    "source_code": "def EnumSessions(self, request, context):\n    context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n    context.set_details('Method not implemented!')\n    raise NotImplementedError('Method not implemented!')",
    "docstring": "Enumerate existing sessions and return available profile tools.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\profiler\\profiler_analysis_pb2_grpc.py",
    "ast_data": "FunctionDef name:EnumSessions arg:self arg:request arg:context arguments arg arg arg Call Call Raise Call"
  },
  {
    "library": "scikit-learn",
    "name": "EstimatorCheckFailedWarning",
    "source_code": "class EstimatorCheckFailedWarning(UserWarning):\n\n    def __init__(self, *, estimator, check_name: str, exception: Exception, status: str, expected_to_fail: bool, expected_to_fail_reason: str):\n        self.estimator = estimator\n        self.check_name = check_name\n        self.exception = exception\n        self.status = status\n        self.expected_to_fail = expected_to_fail\n        self.expected_to_fail_reason = expected_to_fail_reason\n\n    def __repr__(self):\n        expected_to_fail_str = f'Expected to fail: {self.expected_to_fail_reason}' if self.expected_to_fail else 'Not expected to fail'\n        return f'Test {self.check_name} failed for estimator {self.estimator!r}.\\nExpected to fail reason: {expected_to_fail_str}\\nException: {self.exception}'\n\n    def __str__(self):\n        return self.__repr__()",
    "docstring": "Warning raised when an estimator check from the common tests fails. Parameters ---------- estimator : estimator object Estimator instance for which the test failed. check_name : str Name of the check that failed. exception : Exception Exception raised by the failed check. status : str Status of the check. expected_to_fail : bool Whether the check was expected to fail. expected_to_fail_reason : str Reason for the expected failure.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\exceptions.py",
    "ast_data": "ClassDef name:EstimatorCheckFailedWarning FunctionDef name:__init__ arg:self arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Assign Return return:yes FunctionDef name:__str__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_needs_i8_conversion",
    "source_code": "def _needs_i8_conversion(self, key) -> bool:\n    key_dtype = getattr(key, 'dtype', None)\n    if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval):\n        return self._needs_i8_conversion(key.left)\n    i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)\n    return isinstance(key, i8_types)",
    "docstring": "Check if a given key needs i8 conversion. Conversion is necessary for Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An Interval-like requires conversion if its endpoints are one of the aforementioned types. Assumes that any list-like data has already been cast to an Index. Parameters ---------- key : scalar or Index-like The key that should be checked for i8 conversion Returns ------- bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\interval.py",
    "ast_data": "FunctionDef name:_needs_i8_conversion arg:self arg:key arguments arg arg Assign Call If BoolOp Call Call Return return:yes Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "load_graph",
    "source_code": "def load_graph(self, graph, tags, import_scope=None, **saver_kwargs):\n    meta_graph_def = self.get_meta_graph_def_from_tags(tags)\n    if sys.byteorder == 'big':\n        saved_model_utils.swap_function_tensor_content(meta_graph_def, 'little', 'big')\n    with graph.as_default():\n        return tf_saver._import_meta_graph_with_return_elements(meta_graph_def, import_scope=import_scope, **saver_kwargs)",
    "docstring": "Load ops and nodes from SavedModel MetaGraph into graph. Args: graph: tf.Graph object. tags: a set of string tags identifying a MetaGraphDef. import_scope: Optional -- if specified, prepend this string followed by '/' to all loaded tensor names. This scope is applied to tensor instances loaded into the passed session, but it is *not* written through to the static protocol buffer that is returned. **saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph. Returns: A tuple of * Saver defined by the MetaGraph, which can be used to restore the variable values. * List of / objects returned from (may be ).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py",
    "ast_data": "FunctionDef name:load_graph arg:self arg:graph arg:tags arg:import_scope arguments arg arg arg arg arg Assign Call If Compare Call With Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "convert_limits",
    "source_code": "def convert_limits(lim, converter):\n    if isinstance(converter, DateConverter):\n        return map(num2date, lim)\n    return map(float, lim)",
    "docstring": "Convert axis limits for correct input editors.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\backends\\qt_editor\\figureoptions.py",
    "ast_data": "FunctionDef name:convert_limits arg:lim arg:converter arguments arg arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_compute_size_by_dict",
    "source_code": "def _compute_size_by_dict(indices, idx_dict):\n    ret = 1\n    for i in indices:\n        ret *= idx_dict[i]\n    return ret",
    "docstring": "Computes the product of the elements in indices based on the dictionary idx_dict. Parameters ---------- indices : iterable Indices to base the product on. idx_dict : dictionary Dictionary of index sizes Returns ------- ret : int The resulting product. Examples -------- >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5}) 90",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\einsumfunc.py",
    "ast_data": "FunctionDef name:_compute_size_by_dict arg:indices arg:idx_dict arguments arg arg Assign For Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "ones",
    "source_code": "def ones(shape: DynamicRaggedShape, dtype=dtypes.float32, name: Optional[str]=None) -> ragged_tensor.RaggedOrDense:\n    flat_values = array_ops.ones(shape.inner_shape, dtype=dtype, name=name)\n    return ragged_tensor.RaggedTensor._from_nested_row_partitions(flat_values, shape.row_partitions)",
    "docstring": "Returns ones shaped like x.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:ones arg:shape arg:dtype arg:name arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "using",
    "source_code": "def using(self, alias):\n    return RawQuerySet(self.raw_query, model=self.model, query=self.query.chain(using=alias), params=self.params, translations=self.translations, using=alias)",
    "docstring": "Select the database this RawQuerySet should execute against.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:using arg:self arg:alias arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "reset",
    "source_code": "def reset() -> None:\n    import torch._dynamo\n    torch._dynamo.reset()",
    "docstring": "This function clears all compilation caches and restores the system to its initial state. It is recommended to call this function, especially after using operations like to ensure a clean state before another unrelated compilation",
    "type": "function",
    "file_path": "pytorch\\torch\\compiler\\__init__.py",
    "ast_data": "FunctionDef name:reset arguments Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    config = dict(zip(self._fields, self))\n    config['dtype'] = self.dtype.name\n    return config",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Assign Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https://github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once=True)\ndef __init__(self, dist_cls_a, dist_cls_b):\n    self._key = (dist_cls_a, dist_cls_b)",
    "docstring": "Initialize the KL registrar. Args: dist_cls_a: the class of the first argument of the KL divergence. dist_cls_b: the class of the second argument of the KL divergence.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\kullback_leibler.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:dist_cls_a arg:dist_cls_b arguments arg arg arg Assign Call"
  },
  {
    "library": "django",
    "name": "KeysValidator",
    "source_code": "@deconstructible\nclass KeysValidator:\n    messages = {'missing_keys': _('Some keys were missing: %(keys)s'), 'extra_keys': _('Some unknown keys were provided: %(keys)s')}\n    strict = False\n\n    def __init__(self, keys, strict=False, messages=None):\n        self.keys = set(keys)\n        self.strict = strict\n        if messages is not None:\n            self.messages = {**self.messages, **messages}\n\n    def __call__(self, value):\n        keys = set(value)\n        missing_keys = self.keys - keys\n        if missing_keys:\n            raise ValidationError(self.messages['missing_keys'], code='missing_keys', params={'keys': ', '.join(missing_keys)})\n        if self.strict:\n            extra_keys = keys - self.keys\n            if extra_keys:\n                raise ValidationError(self.messages['extra_keys'], code='extra_keys', params={'keys': ', '.join(extra_keys)})\n\n    def __eq__(self, other):\n        return isinstance(other, self.__class__) and self.keys == other.keys and (self.messages == other.messages) and (self.strict == other.strict)",
    "docstring": "A validator designed for HStore to require/restrict keys.",
    "type": "class",
    "file_path": "django\\django\\contrib\\postgres\\validators.py",
    "ast_data": "ClassDef name:KeysValidator Assign Call Call Assign FunctionDef name:__init__ arg:self arg:keys arg:strict arg:messages arguments arg arg arg arg Assign Call Assign If Compare Assign FunctionDef name:__call__ arg:self arg:value arguments arg arg Assign Call Assign If Raise Call Call If Assign If Raise Call Call FunctionDef name:__eq__ arg:self arg:other arguments arg arg Return return:yes BoolOp Call Compare Compare Compare"
  },
  {
    "library": "matplotlib",
    "name": "get_linestyle",
    "source_code": "def get_linestyle(self):\n    return self._linestyle",
    "docstring": "Return the linestyle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_linestyle arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "slot_variable_key",
    "source_code": "def slot_variable_key(variable_path, optimizer_path, slot_name):\n    return f'{variable_path}/{_OPTIMIZER_SLOTS_NAME}/{optimizer_path}/{escape_local_name(slot_name)}'",
    "docstring": "Returns checkpoint key for a slot variable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\trackable_utils.py",
    "ast_data": "FunctionDef name:slot_variable_key arg:variable_path arg:optimizer_path arg:slot_name arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_meta_graph_def",
    "source_code": "def get_meta_graph_def(saved_model_dir, tag_set):\n    return saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)",
    "docstring": "DEPRECATED: Use saved_model_utils.get_meta_graph_def instead. Gets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given tag-set and SavedModel directory. Args: saved_model_dir: Directory containing the SavedModel to inspect or execute. tag_set: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Raises: RuntimeError: An error when the given tag-set does not exist in the SavedModel. Returns: A MetaGraphDef corresponding to the tag-set.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_cli.py",
    "ast_data": "FunctionDef name:get_meta_graph_def arg:saved_model_dir arg:tag_set arguments arg arg Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "ci_to_errsize",
    "source_code": "def ci_to_errsize(cis, heights):\n    cis = np.atleast_2d(cis).reshape(2, -1)\n    heights = np.atleast_1d(heights)\n    errsize = []\n    for i, (low, high) in enumerate(np.transpose(cis)):\n        h = heights[i]\n        elow = h - low\n        ehigh = high - h\n        errsize.append([elow, ehigh])\n    errsize = np.asarray(errsize).T\n    return errsize",
    "docstring": "Convert intervals to error arguments relative to plot heights. Parameters ---------- cis : 2 x n sequence sequence of confidence interval limits heights : n sequence sequence of plot heights Returns ------- errsize : 2 x n array sequence of error size relative to height values in correct format as argument for plt.bar",
    "type": "function",
    "file_path": "seaborn\\seaborn\\utils.py",
    "ast_data": "FunctionDef name:ci_to_errsize arg:cis arg:heights arguments arg arg Assign Call Call Assign Call Assign For Call Call Assign Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_iterator_spec_from_dataset",
    "source_code": "def get_iterator_spec_from_dataset(strategy, dataset):\n    output_element_spec = dataset.element_spec\n    if isinstance(dataset._type_spec, (DistributedDatasetSpec, DistributedDatasetsFromFunctionSpec)):\n        iterator_type_spec = DistributedIteratorSpec(strategy.extended._input_workers_with_options(), output_element_spec, strategy.extended._container_strategy(), options=None, cardinality=dataset.cardinality, enable_get_next_as_optional=True)\n    else:\n        if strategy.extended._num_gpus_per_worker:\n            logging.warning(f'{strategy.extended._num_gpus_per_worker} GPUs are allocated per worker. Please use DistributedDataset by calling strategy.experimental_distribute_dataset or strategy.distribute_datasets_from_function to make best use of GPU resources')\n        iterator_type_spec = iterator_ops.IteratorSpec(output_element_spec)\n    return iterator_type_spec",
    "docstring": "Returns an iterator spec from dataset function. This function constructs type spec for iterator obtained from iter(dataset). Args: strategy: a object, used to run all-reduce to handle last partial batch. dataset: A tf.data.Dataset instance. If using a function that returns a tf.data.Dataset instance, pass dataset_fn.structured_outputs. Returns: A type_spec for iterator for dataset instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py",
    "ast_data": "FunctionDef name:get_iterator_spec_from_dataset arg:strategy arg:dataset arguments arg arg Assign If Call Assign Call Call Call If Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "index",
    "source_code": "def index(self, request, extra_context=None):\n    app_list = self.get_app_list(request)\n    context = {**self.each_context(request), 'title': self.index_title, 'subtitle': None, 'app_list': app_list, **(extra_context or {})}\n    request.current_app = self.name\n    return TemplateResponse(request, self.index_template or 'admin/index.html', context)",
    "docstring": "Display the main admin index page, which lists all of the installed apps that have been registered in this site.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\sites.py",
    "ast_data": "FunctionDef name:index arg:self arg:request arg:extra_context arguments arg arg arg Assign Call Assign Call BoolOp Assign Return return:yes Call BoolOp"
  },
  {
    "library": "django",
    "name": "_model_indexes_sql",
    "source_code": "def _model_indexes_sql(self, model):\n    if not model._meta.managed or model._meta.proxy or model._meta.swapped:\n        return []\n    output = []\n    for field in model._meta.local_fields:\n        output.extend(self._field_indexes_sql(model, field))\n    for index in model._meta.indexes:\n        if not index.contains_expressions or self.connection.features.supports_expression_indexes:\n            output.append(index.create_sql(model, self))\n    return output",
    "docstring": "Return a list of all index SQL statements (field indexes, Meta.indexes) for the specified model.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\schema.py",
    "ast_data": "FunctionDef name:_model_indexes_sql arg:self arg:model arguments arg arg If BoolOp Return return:no Assign For Call Call For If BoolOp Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_fill_between_process_units",
    "source_code": "def _fill_between_process_units(self, ind_dir, dep_dir, ind, dep1, dep2, **kwargs):\n    return map(np.ma.masked_invalid, self._process_unit_info([(ind_dir, ind), (dep_dir, dep1), (dep_dir, dep2)], kwargs))",
    "docstring": "Handle united data, such as dates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py",
    "ast_data": "FunctionDef name:_fill_between_process_units arg:self arg:ind_dir arg:dep_dir arg:ind arg:dep1 arg:dep2 arguments arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "create",
    "source_code": "@abc.abstractmethod\ndef create(self, batch_outs):\n    raise NotImplementedError('Must be implemented in subclasses.')",
    "docstring": "Creates the initial results from the first batch outputs. Args: batch_outs: A list of batch-level outputs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:create arg:self arg:batch_outs arguments arg arg Raise Call"
  },
  {
    "library": "django",
    "name": "PermissionRequiredMixin",
    "source_code": "class PermissionRequiredMixin(AccessMixin):\n    permission_required = None\n\n    def get_permission_required(self):\n        if self.permission_required is None:\n            raise ImproperlyConfigured(f'{self.__class__.__name__} is missing the permission_required attribute. Define {self.__class__.__name__}.permission_required, or override {self.__class__.__name__}.get_permission_required().')\n        if isinstance(self.permission_required, str):\n            perms = (self.permission_required,)\n        else:\n            perms = self.permission_required\n        return perms\n\n    def has_permission(self):\n        perms = self.get_permission_required()\n        return self.request.user.has_perms(perms)\n\n    def dispatch(self, request, *args, **kwargs):\n        if not self.has_permission():\n            return self.handle_no_permission()\n        return super().dispatch(request, *args, **kwargs)",
    "docstring": "Verify that the current user has all specified permissions.",
    "type": "class",
    "file_path": "django\\django\\contrib\\auth\\mixins.py",
    "ast_data": "ClassDef name:PermissionRequiredMixin Assign FunctionDef name:get_permission_required arg:self arguments arg If Compare Raise Call If Call Assign Assign Return return:yes FunctionDef name:has_permission arg:self arguments arg Assign Call Return return:yes Call FunctionDef name:dispatch arg:self arg:request arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_patch_limits",
    "source_code": "def _update_patch_limits(self, patch):\n    if isinstance(patch, mpatches.Rectangle) and (not patch.get_width() and (not patch.get_height())):\n        return\n    p = patch.get_path()\n    vertices = []\n    for curve, code in p.iter_bezier(simplify=False):\n        _, dzeros = curve.axis_aligned_extrema()\n        vertices.append(curve([0, *dzeros, 1]))\n    if len(vertices):\n        vertices = np.vstack(vertices)\n    patch_trf = patch.get_transform()\n    updatex, updatey = patch_trf.contains_branch_seperately(self.transData)\n    if not (updatex or updatey):\n        return\n    if self.name != 'rectilinear':\n        if updatex and patch_trf == self.get_yaxis_transform():\n            updatex = False\n        if updatey and patch_trf == self.get_xaxis_transform():\n            updatey = False\n    trf_to_data = patch_trf - self.transData\n    xys = trf_to_data.transform(vertices)\n    self.update_datalim(xys, updatex=updatex, updatey=updatey)",
    "docstring": "Update the data limits for the given patch.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:_update_patch_limits arg:self arg:patch arguments arg arg If BoolOp Call BoolOp Call Call Return return:no Assign Call Assign For Call Assign Call Call Call If Call Assign Call Assign Call Assign Call If BoolOp Return return:no If Compare If BoolOp Compare Call Assign If BoolOp Compare Call Assign Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_build_tensor_data",
    "source_code": "def _build_tensor_data(self):\n    map_index_to_variable = {}\n    for var in self._func.graph.variables:\n        for idx, captured_input in enumerate(self._func.captured_inputs):\n            if var.handle is captured_input:\n                map_index_to_variable[idx] = var\n                break\n    for idx, (val_tensor, name_tensor) in enumerate(self._func.graph.captures):\n        tensor_name = name_tensor.name.split(':')[0]\n        if not self._should_convert(tensor_name):\n            continue\n        if idx in map_index_to_variable:\n            data = self._eval(map_index_to_variable[idx])\n        else:\n            if val_tensor.dtype == dtypes.resource:\n                logging.vlog(1, 'Skip converting resource tensor %s' % tensor_name)\n                continue\n            data = np.array(self._eval(val_tensor))\n        self._tensor_data[tensor_name] = _TensorData(numpy=data, dtype=dtypes.as_dtype(data.dtype).as_datatype_enum, index=idx)\n    for node in self.node_defs.values():\n        if node.op == 'VariableV2':\n            if not self._should_convert(node.name):\n                continue\n            if node.name not in self.tensor_data:\n                with self._func.graph.as_default():\n                    identity_node = array_ops.identity(self._func.graph.as_graph_element(node.name + ':0'))\n                pruned_graph = self._func.prune([], [identity_node.name])()[0]\n                self._tensor_data[node.name] = _TensorData(numpy=pruned_graph.numpy(), dtype=node.attr['dtype'].type, index=None)",
    "docstring": "Caches the tensor data for all Placeholders in the given function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:_build_tensor_data arg:self arguments arg Assign For For Call If Compare Assign For Call Assign Call If Call If Compare Assign Call If Compare Call Assign Call Call Assign Call Call For Call If Compare If Call If Compare With Call Assign Call Call Assign Call Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "forward_loop_exits",
    "source_code": "@property\ndef forward_loop_exits(self):\n    return self._forward_loop_exits",
    "docstring": "The list of exits of the forward loop.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:forward_loop_exits arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_hertz_to_mel",
    "source_code": "def _hertz_to_mel(frequencies_hertz, name=None):\n    with ops.name_scope(name, 'hertz_to_mel', [frequencies_hertz]):\n        frequencies_hertz = ops.convert_to_tensor(frequencies_hertz)\n        return _MEL_HIGH_FREQUENCY_Q * math_ops.log(1.0 + frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ)",
    "docstring": "Converts frequencies in in Hertz to the mel scale. Args: frequencies_hertz: A of frequencies in Hertz. name: An optional name for the operation. Returns: A of the same shape and type of containing frequencies in the mel scale.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\signal\\mel_ops.py",
    "ast_data": "FunctionDef name:_hertz_to_mel arg:frequencies_hertz arg:name arguments arg arg With Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "no_regularizer",
    "source_code": "@tf_export(v1=['no_regularizer'])\ndef no_regularizer(_):\n    return None",
    "docstring": "Use this function to prevent regularization of variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variable_scope.py",
    "ast_data": "FunctionDef name:no_regularizer arg:_ arguments arg Return return:no Call"
  },
  {
    "library": "pytorch",
    "name": "_is_same_info_for_same_key",
    "source_code": "def _is_same_info_for_same_key(self, info_dict_a: dict, info_dict_b: dict) -> bool:\n    dict_a_keys: set = set(info_dict_a.keys())\n    dict_b_keys: set = set(info_dict_b.keys())\n    intersecting_keys: set = dict_a_keys.intersection(dict_b_keys)\n    for key in intersecting_keys:\n        dict_a_val = info_dict_a[key]\n        dict_b_val = info_dict_b[key]\n        if type(dict_a_val) == torch.Tensor:\n            if type(dict_b_val) != torch.Tensor or sum(dict_a_val != dict_b_val) != 0:\n                return False\n        elif dict_a_val != dict_b_val:\n            return False\n    return True",
    "docstring": "Takes in two dictionaries and ensures that any common keys between the two have the same values. Args: info_dict_a (Dict): First dictionary we wish to compare info_dict_b (Dict): Second dictionary we wish to compare Returns True if all shared keys have same values, false otherwise",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:_is_same_info_for_same_key arg:self arg:info_dict_a arg:info_dict_b arguments arg arg arg Call Call Call Call Call For Assign Assign If Compare Call If BoolOp Compare Call Compare Call Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "flat_inputs",
    "source_code": "@property\ndef flat_inputs(self) -> List[trace.TraceType]:\n    if not hasattr(self, '_cached_flat_inputs'):\n        cached_flat_inputs = []\n        for p in self._sorted_parameters:\n            cached_flat_inputs.extend(p.type_constraint.flatten())\n        self._cached_flat_inputs = cached_flat_inputs\n    return self._cached_flat_inputs",
    "docstring": "Flat tensor inputs accepted by this FunctionType.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:flat_inputs arg:self arguments arg If Call Assign For Call Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "run_ui",
    "source_code": "def run_ui(self, init_command=None, title=None, title_color=None, enable_mouse_on_start=True):\n    raise NotImplementedError('run_ui() is not implemented in BaseUI')",
    "docstring": "Run the UI until user- or command- triggered exit. Args: init_command: (str) Optional command to run on CLI start up. title: (str) Optional title to display in the CLI. title_color: (str) Optional color of the title, e.g., \"yellow\". enable_mouse_on_start: (bool) Whether the mouse mode is to be enabled on start-up. Returns: An exit token of arbitrary type. Can be None.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\base_ui.py",
    "ast_data": "FunctionDef name:run_ui arg:self arg:init_command arg:title arg:title_color arg:enable_mouse_on_start arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_is_intermediate_tensor_sym_call",
    "source_code": "def _is_intermediate_tensor_sym_call(node: fx.Node) -> bool:\n    return (val := _get_sym_val(node)) is not None and (not isinstance(val, sympy.Number)) and (not _has_uninterpretable_sympy_function(val)) and any((isinstance(arg, fx.Node) and isinstance(_get_example_value(arg), (torch.Tensor, torch.Size)) and (arg.op != 'placeholder') for arg in node.args))",
    "docstring": "If a size/stride/storage offset call on an intermediate tensor, we can try to compute the value from input shapes instead.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\passes\\runtime_assert.py",
    "ast_data": "FunctionDef name:_is_intermediate_tensor_sym_call arg:node arguments arg Return return:yes BoolOp Compare Call Call Call Call BoolOp Call Call Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "copy",
    "source_code": "def copy(self, node):\n    if isinstance(node, list):\n        return [self.copy(n) for n in node]\n    elif isinstance(node, tuple):\n        return tuple((self.copy(n) for n in node))\n    elif not isinstance(node, (gast.AST, ast.AST)):\n        return node\n    assert isinstance(node, (gast.AST, ast.AST))\n    new_fields = {}\n    for f in node._fields:\n        if not f.startswith('__') and hasattr(node, f):\n            new_fields[f] = self.copy(getattr(node, f))\n    new_node = type(node)(**new_fields)\n    if self.preserve_annos:\n        for k in self.preserve_annos:\n            anno.copyanno(node, new_node, k)\n    return new_node",
    "docstring": "Returns a deep copy of node (excluding some fields, see copy_clean).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\ast_util.py",
    "ast_data": "FunctionDef name:copy arg:self arg:node arguments arg arg If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Call Assign For If BoolOp Call Call Assign Call Call Assign Call Call If For Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "tx",
    "source_code": "@tx.setter\ndef tx(self, value: Union[Tensor, float]) -> 'PinholeCamera':\n    self.extrinsics[..., 0, -1] = value\n    return self",
    "docstring": "Set the x-coordinate of the translation vector with the given value.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:tx arg:self arg:value arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_FindNodesLookupTable",
    "source_code": "class _FindNodesLookupTable:\n\n    def __init__(self):\n        self.table: dict[tuple[str, Optional[Target]], dict[Node, None]] = defaultdict(dict)\n\n    def _key(self, node) -> tuple[str, Optional[Target]]:\n        return (node.op, node.target if node.op == 'call_function' else None)\n\n    def __contains__(self, node) -> bool:\n        return node in self.table[self._key(node)]\n\n    def insert(self, node: Node) -> None:\n        self.table[self._key(node)][node] = None\n\n    def remove(self, node: Node) -> None:\n        self.table[self._key(node)].pop(node)\n\n    def find_nodes(self, *, op: str, target: Optional['Target']=None):\n        if op == 'call_function':\n            assert target is not None\n            return [*self.table[op, target].keys()]\n        if target is None:\n            return [*self.table[op, None].keys()]\n        return [node for node in self.table[op, None].keys() if node.target == target]",
    "docstring": "Side table for the graph for the purpose of doing fast queries",
    "type": "class",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "ClassDef name:_FindNodesLookupTable FunctionDef name:__init__ arg:self arguments arg Call FunctionDef name:_key arg:self arg:node arguments arg arg Return return:yes Compare FunctionDef name:__contains__ arg:self arg:node arguments arg arg Return return:yes Compare Call FunctionDef name:insert arg:self arg:node arguments arg arg Assign Call FunctionDef name:remove arg:self arg:node arguments arg arg Call Call FunctionDef name:find_nodes arg:self arguments arg arg arg If Compare Compare Return return:yes Call If Compare Return return:yes Call Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "_has_precomputed_row_lengths",
    "source_code": "def _has_precomputed_row_lengths(self):\n    return self._row_lengths is not None",
    "docstring": "Returns true if has already been computed. If true, then will return its value without calling any TensorFlow ops.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py",
    "ast_data": "FunctionDef name:_has_precomputed_row_lengths arg:self arguments arg Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "configure_and_create_distributed_session",
    "source_code": "def configure_and_create_distributed_session(distribution_strategy):\n\n    def _create_session(distribution_strategy):\n        session_config = get_default_session_config()\n        global _SESSION\n        if getattr(_SESSION, 'session', None) and _SESSION.session._config:\n            session_config.MergeFrom(_SESSION.session._config)\n        if is_tpu_strategy(distribution_strategy):\n            distribution_strategy.configure(session_config)\n            master = distribution_strategy.extended._tpu_cluster_resolver.master()\n            session = session_module.Session(config=session_config, target=master)\n        else:\n            worker_context = dc.get_current_worker_context()\n            if worker_context:\n                dc_session_config = worker_context.session_config\n                dc_session_config.MergeFrom(session_config)\n                session = session_module.Session(config=dc_session_config, target=worker_context.master_target)\n            else:\n                distribution_strategy.configure(session_config)\n                session = session_module.Session(config=session_config)\n        set_session(session)\n    if distribution_strategy.extended._in_multi_worker_mode():\n        dc.run_distribute_coordinator(_create_session, distribution_strategy)\n    else:\n        _create_session(distribution_strategy)",
    "docstring": "Configure session config and create a session with it.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:configure_and_create_distributed_session arg:distribution_strategy arguments arg FunctionDef name:_create_session arg:distribution_strategy arguments arg Assign Call If BoolOp Call Call If Call Call Assign Call Assign Call Assign Call If Assign Call Assign Call Call Assign Call Call If Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "same_meta",
    "source_code": "def same_meta(node1: torch.fx.Node, node2: torch.fx.Node):\n    val1 = node1.meta.get('val')\n    val2 = node2.meta.get('val')\n    return val1 is not None and val2 is not None and statically_known_true(sym_eq(val1.size(), val2.size())) and (val1.layout == val2.layout) and (val1.dtype == val2.dtype) and (val1.device == val2.device) and (val1.layout != torch.strided or statically_known_true(sym_eq(val1.stride(), val2.stride())))",
    "docstring": "True if two nodes have the same metadata",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:same_meta arg:node1 arg:node2 arguments arg arg Assign Call Assign Call Return return:yes BoolOp Compare Compare Call Call Call Call Compare Compare Compare BoolOp Compare Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "temporarily_disable_xla_sharding_for_resource_variables",
    "source_code": "@contextlib.contextmanager\ndef temporarily_disable_xla_sharding_for_resource_variables():\n    previously_enabled = xla_sharding_for_resource_variables_enabled()\n    try:\n        disable_xla_sharding_for_resource_variables()\n        yield\n    finally:\n        if previously_enabled:\n            enable_xla_sharding_for_resource_variables()",
    "docstring": "Temporarily disables XLA sharding for resource variables. Should be a no-op if it is already disabled. Yields: None.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:temporarily_disable_xla_sharding_for_resource_variables arguments Assign Call Try Call If Call"
  },
  {
    "library": "tensorflow",
    "name": "_bias_add_grad_flops",
    "source_code": "@ops.RegisterStatistics('BiasAddGrad', 'flops')\ndef _bias_add_grad_flops(graph, node):\n    return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)",
    "docstring": "Compute flops for BiasAddGrad operation.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\internal\\flops_registry.py",
    "ast_data": "FunctionDef name:_bias_add_grad_flops arg:graph arg:node arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "ErrorbarContainer",
    "source_code": "class ErrorbarContainer(Container):\n\n    def __init__(self, lines, has_xerr=False, has_yerr=False, **kwargs):\n        self.lines = lines\n        self.has_xerr = has_xerr\n        self.has_yerr = has_yerr\n        super().__init__(lines, **kwargs)",
    "docstring": "Container for the artists of error bars (e.g. created by ). The container can be treated as the *lines* tuple itself. Additionally, you can access these and further parameters by the attributes. Attributes ---------- lines : tuple Tuple of `~matplotlib.lines.Line2D~matplotlib.lines.Line2D~matplotlib.collections.LineCollection` if the errorbar has x/y errors.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\container.py",
    "ast_data": "ClassDef name:ErrorbarContainer FunctionDef name:__init__ arg:self arg:lines arg:has_xerr arg:has_yerr arguments arg arg arg arg arg Assign Assign Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "compile",
    "source_code": "@abstractmethod\ndef compile(self) -> None:\n    pass",
    "docstring": "Compile the wrapper.",
    "type": "method",
    "file_path": "numpy\\numpy\\f2py\\_backends\\_backend.py",
    "ast_data": "FunctionDef name:compile arg:self arguments arg"
  },
  {
    "library": "pytorch",
    "name": "_convert_standalone_module_fx",
    "source_code": "def _convert_standalone_module_fx(graph_module: GraphModule, is_reference: bool=False, convert_custom_config: Union[ConvertCustomConfig, dict[str, Any], None]=None) -> GraphModule:\n    return _convert_fx(graph_module, is_reference, convert_custom_config, is_standalone_module=True)",
    "docstring": "[Internal use only] Convert a model produced by :func: and convert it to a quantized model Returns a quantized standalone module, whether input/output is quantized is specified by prepare_custom_config, with input_quantized_idxs, output_quantized_idxs, please see docs for prepare_fx for details",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\quantize_fx.py",
    "ast_data": "FunctionDef name:_convert_standalone_module_fx arg:graph_module arg:is_reference arg:convert_custom_config arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "scrapy",
    "name": "curl_to_request_kwargs",
    "source_code": "def curl_to_request_kwargs(curl_command: str, ignore_unknown_options: bool=True) -> dict[str, Any]:\n    curl_args = split(curl_command)\n    if curl_args[0] != 'curl':\n        raise ValueError('A curl command must start with \"curl\"')\n    parsed_args, argv = curl_parser.parse_known_args(curl_args[1:])\n    if argv:\n        msg = f'Unrecognized options: {', '.join(argv)}'\n        if ignore_unknown_options:\n            warnings.warn(msg)\n        else:\n            raise ValueError(msg)\n    url = parsed_args.url\n    parsed_url = urlparse(url)\n    if not parsed_url.scheme:\n        url = 'http://' + url\n    method = parsed_args.method or 'GET'\n    result: dict[str, Any] = {'method': method.upper(), 'url': url}\n    headers, cookies = _parse_headers_and_cookies(parsed_args)\n    if headers:\n        result['headers'] = headers\n    if cookies:\n        result['cookies'] = cookies\n    if parsed_args.data:\n        result['body'] = parsed_args.data\n        if not parsed_args.method:\n            result['method'] = 'POST'\n    return result",
    "docstring": "Convert a cURL command syntax to Request kwargs. :param str curl_command: string containing the curl command :param bool ignore_unknown_options: If true, only a warning is emitted when cURL options are unknown. Otherwise raises an error. (default: True) :return: dictionary of Request kwargs",
    "type": "function",
    "file_path": "scrapy\\scrapy\\utils\\curl.py",
    "ast_data": "FunctionDef name:curl_to_request_kwargs arg:curl_command arg:ignore_unknown_options arguments arg arg Assign Call If Compare Raise Call Assign Call If Assign Call If Call Raise Call Assign Assign Call If Assign Assign BoolOp Call Assign Call If Assign If Assign If Assign If Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "crop_by_transform_mat3d",
    "source_code": "def crop_by_transform_mat3d(tensor: torch.Tensor, transform: torch.Tensor, out_size: Tuple[int, int, int], mode: str='bilinear', padding_mode: str='zeros', align_corners: bool=True) -> torch.Tensor:\n    dst_trans_src = transform.expand(tensor.shape[0], -1, -1)\n    patches: torch.Tensor = warp_affine3d(tensor, dst_trans_src[:, :3, :], out_size, flags=mode, padding_mode=padding_mode, align_corners=align_corners)\n    return patches",
    "docstring": "Perform crop transform on 3D volumes (5D tensor) given a perspective transformation matrix. Args: tensor: the 2D image tensor with shape (B, C, H, W). transform: a perspective transformation matrix with shape (B, 4, 4). out_size: size of the output image (depth, height, width). mode: interpolation mode to calculate output values ``. align_corners: mode for grid_generation. Returns: the output tensor with patches.",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\transform\\crop3d.py",
    "ast_data": "FunctionDef name:crop_by_transform_mat3d arg:tensor arg:transform arg:out_size arg:mode arg:padding_mode arg:align_corners arguments arg arg arg arg arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_name",
    "source_code": "def get_name(cls):\n    if not (isinstance(cls, type) and issubclass(cls, internal.TypeSpec)):\n        raise TypeError('Expected `cls` to be a TypeSpec; got %r' % (cls,))\n    if cls not in _TYPE_SPEC_TO_NAME:\n        raise ValueError('TypeSpec %s.%s has not been registered.' % (cls.__module__, cls.__name__))\n    return _TYPE_SPEC_TO_NAME[cls]",
    "docstring": "Returns the registered name for TypeSpec .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec_registry.py",
    "ast_data": "FunctionDef name:get_name arg:cls arguments arg If BoolOp Call Call Raise Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "UrlContract",
    "source_code": "class UrlContract(Contract):\n    name = 'url'\n\n    def adjust_request_args(self, args: dict[str, Any]) -> dict[str, Any]:\n        args['url'] = self.args[0]\n        return args",
    "docstring": "Contract to set the url of the request (mandatory) @url",
    "type": "class",
    "file_path": "scrapy\\scrapy\\contracts\\default.py",
    "ast_data": "ClassDef name:UrlContract Assign FunctionDef name:adjust_request_args arg:self arg:args arguments arg arg Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "gemm_mode",
    "source_code": "def gemm_mode(self) -> str:\n    sizes = self.output_node.get_size()\n    if len(sizes) > 2:\n        return 'cutlass::gemm::GemmUniversalMode::kBatched'\n    else:\n        return 'cutlass::gemm::GemmUniversalMode::kGemm'",
    "docstring": "Returns a Cutlass GEMM mode string for the current operation, dependent on whether this op implements a batched GEMM or a simple GEMM without batch dimension. Returns: str: A string indicating the Cutlass GEMM mode. If the output node has more than two dimensions, \"cutlass::gemm::GemmUniversalMode::kBatched\" is returned, otherwise \"cutlass::gemm::GemmUniversalMode::kGemm\" is returned.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:gemm_mode arg:self arguments arg Assign Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "to_dense",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef to_dense(tensor):\n    if is_sparse(tensor):\n        return sparse_ops.sparse_tensor_to_dense(tensor)\n    else:\n        return tensor",
    "docstring": "Converts a sparse tensor into a dense tensor and returns it. Args: tensor: A tensor instance (potentially sparse). Returns: A dense tensor. Examples: >>> b = tf.keras.backend.placeholder((2, 2), sparse=True) >>> print(tf.keras.backend.is_sparse(b)) True >>> c = tf.keras.backend.to_dense(b) >>> print(tf.keras.backend.is_sparse(c)) False",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:to_dense arg:tensor arguments arg If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "start_filter",
    "source_code": "def start_filter(self):\n    pass",
    "docstring": "Switch to a temporary renderer for image filtering effects. Currently only supported by the agg renderer.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:start_filter arg:self arguments arg"
  },
  {
    "library": "django",
    "name": "save_related",
    "source_code": "def save_related(self, request, form, formsets, change):\n    form.save_m2m()\n    for formset in formsets:\n        self.save_formset(request, form, formset, change=change)",
    "docstring": "Given the `` instance, the list of inline formsets and a boolean value based on whether the parent is being added or changed, save the related objects to the database. Note that at this point save_form() and save_model() have already been called.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:save_related arg:self arg:request arg:form arg:formsets arg:change arguments arg arg arg arg arg Call For Call"
  },
  {
    "library": "sphinx",
    "name": "save_traceback",
    "source_code": "def save_traceback(exception: BaseException, *, message_log: Collection[str]=(), extensions: Collection[Extension]=()) -> str:\n    output = full_exception_context(exception=exception, message_log=message_log, extensions=extensions)\n    filename = write_temporary_file(output)\n    return filename",
    "docstring": "Save the given exception's traceback in a temporary file.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\_cli\\util\\errors.py",
    "ast_data": "FunctionDef name:save_traceback arg:exception arguments arg arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "__setitem__",
    "source_code": "def __setitem__(self, indx, value):\n    ma.MaskedArray.__setitem__(self, indx, value)\n    if isinstance(indx, str):\n        self._mask[indx] = ma.getmaskarray(value)",
    "docstring": "Sets the given record to value.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\mrecords.py",
    "ast_data": "FunctionDef name:__setitem__ arg:self arg:indx arg:value arguments arg arg arg Call If Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "values_from_const",
    "source_code": "def values_from_const(node_def: node_def_pb2.NodeDef) -> np.ndarray:\n    if node_def.op != 'Const':\n        raise ValueError(f'Can not extract constant value from a node that is not Const. Got:\\n{node_def}')\n    input_tensor = node_def.attr['value'].tensor\n    tensor_value = tensor_util.MakeNdarray(input_tensor)\n    return tensor_value",
    "docstring": "Extracts the values from a const NodeDef as a numpy ndarray. Args: node_def: Const NodeDef that has the values we want to access. Returns: Numpy ndarray containing the values. Raises: ValueError: If the node isn't a Const.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference_lib.py",
    "ast_data": "FunctionDef name:values_from_const arg:node_def arguments arg If Compare Raise Call Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "extract_variable_info",
    "source_code": "def extract_variable_info(kwargs) -> Tuple[Text, Tuple[int, ...], dtypes.DType, Callable[[], Any]]:\n    if isinstance(kwargs['initial_value'], functools.partial) and ('shape' in kwargs['initial_value'].keywords or kwargs['initial_value'].args):\n        if 'shape' in kwargs['initial_value'].keywords:\n            shape = kwargs['initial_value'].keywords['shape']\n        else:\n            shape = kwargs['initial_value'].args[0]\n        return (kwargs['name'], shape, kwargs['initial_value'].keywords.get('dtype', kwargs['dtype']), kwargs['initial_value'].func)\n    elif 'shape' not in kwargs or kwargs['shape'] is None or (not callable(kwargs['initial_value'])):\n        raise ValueError('Unable to extract initializer function and shape from {}. Please either pass a function that expects a shape and dtype as the initial value for your variable or functools.partial object with the shape and dtype kwargs set. This is needed so that we can initialize the shards of the ShardedVariable locally.'.format(kwargs['initial_value']))\n    else:\n        return (kwargs['name'], kwargs['shape'], kwargs['dtype'], kwargs['initial_value'])",
    "docstring": "Extracts the variable creation attributes from the kwargs. Args: kwargs: a dict of keyword arguments that were passed to a variable creator scope. Returns: A tuple of variable name, shape, dtype, initialization function.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py",
    "ast_data": "FunctionDef name:extract_variable_info arg:kwargs arguments arg If BoolOp Call BoolOp Compare If Compare Assign Assign Return return:yes Call If BoolOp Compare Compare Call Raise Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "scan",
    "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.scan(...) instead')\n@tf_export('data.experimental.scan')\ndef scan(initial_state, scan_func):\n\n    def _apply_fn(dataset):\n        return dataset.scan(initial_state=initial_state, scan_func=scan_func)\n    return _apply_fn",
    "docstring": "A transformation that scans a function across an input dataset. This transformation is a stateful relative of . In addition to mapping across the elements of the input dataset, accumulates one or more state tensors, whose initial values are . Args: initial_state: A nested structure of tensors, representing the initial state of the accumulator. scan_func: A function that maps to . It must take two arguments and return a pair of nested structures of tensors. The must match the structure of . Returns: A transformation function, which can be passed to .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\scan_ops.py",
    "ast_data": "FunctionDef name:scan arg:initial_state arg:scan_func arguments arg arg FunctionDef name:_apply_fn arg:dataset arguments arg Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "export",
    "source_code": "def export(self, name=None):\n    with ops.name_scope(name, '%s_lookup_table_export_values' % self.name, [self.resource_handle]):\n        with ops.colocate_with(self.resource_handle):\n            exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(self.resource_handle, self._key_dtype, self._value_dtype)\n    return (exported_keys, exported_values)",
    "docstring": "Returns tensors of all keys and values in the table. Args: name: A name for the operation (optional). Returns: A pair of tensors with the first tensor containing all keys and the second tensors containing all values in the table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py",
    "ast_data": "FunctionDef name:export arg:self arg:name arguments arg arg With Call With Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_tensor_method_or_property",
    "source_code": "@_disable_user_warnings\ndef is_tensor_method_or_property(func: Callable) -> bool:\n    return func in _get_tensor_methods() or func.__name__ == '__get__'",
    "docstring": "Returns True if the function passed in is a handler for a method or property belonging to `__module__`. Examples -------- >>> is_tensor_method_or_property(torch.Tensor.add) True >>> is_tensor_method_or_property(torch.add) False",
    "type": "function",
    "file_path": "pytorch\\torch\\overrides.py",
    "ast_data": "FunctionDef name:is_tensor_method_or_property arg:func arguments arg Return return:yes BoolOp Compare Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "targets",
    "source_code": "def targets(self):\n    return self._targets",
    "docstring": "Return the unique names of ops to run. Returns: A list of strings.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:targets arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "IgnoreRequest",
    "source_code": "class IgnoreRequest(Exception):\n    pass",
    "docstring": "Indicates a decision was made not to process a request",
    "type": "class",
    "file_path": "scrapy\\scrapy\\exceptions.py",
    "ast_data": "ClassDef name:IgnoreRequest"
  },
  {
    "library": "django",
    "name": "add_annotation",
    "source_code": "def add_annotation(self, annotation, alias, select=True):\n    self.check_alias(alias)\n    annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None)\n    if select:\n        self.append_annotation_mask([alias])\n    else:\n        self.set_annotation_mask(set(self.annotation_select).difference({alias}))\n    self.annotations[alias] = annotation\n    if select and self.selected:\n        self.selected[alias] = alias",
    "docstring": "Add a single annotation expression to the Query.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:add_annotation arg:self arg:annotation arg:alias arg:select arguments arg arg arg arg Call Assign Call If Call Call Call Call Assign If BoolOp Assign"
  },
  {
    "library": "tensorflow",
    "name": "_serialize_function_to_config",
    "source_code": "def _serialize_function_to_config(function):\n    if isinstance(function, python_types.LambdaType):\n        output = generic_utils.func_dump(function)\n        output_type = 'lambda'\n        module = function.__module__\n    elif callable(function):\n        output = function.__name__\n        output_type = 'function'\n        module = function.__module__\n    else:\n        raise ValueError('Unrecognized function type for input: {}'.format(type(function)))\n    return (output, output_type, module)",
    "docstring": "Serialize the function for get_config().",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_wrapper_impl.py",
    "ast_data": "FunctionDef name:_serialize_function_to_config arg:function arguments arg If Call Assign Call Assign Assign If Call Assign Assign Assign Raise Call Call Call Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "wsgi",
    "source_code": "@property\ndef wsgi(self):\n    return bool(self._get_builtin_handler(self.error_log, 'wsgi'))",
    "docstring": "Write errors to wsgi.errors. If you set this to True, it'll add the appropriate :class: for you (which writes errors to ``). If you set it to False, it will remove the handler.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cplogging.py",
    "ast_data": "FunctionDef name:wsgi arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "parse_data",
    "source_code": "def parse_data(self) -> list[dict[str, str | None]]:\n    raise AbstractMethodError(self)",
    "docstring": "Parse xml data. This method will call the other internal methods to validate ``, names, parse and return specific nodes.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\xml.py",
    "ast_data": "FunctionDef name:parse_data arg:self arguments arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_collect_resource_inputs",
    "source_code": "def _collect_resource_inputs(op):\n\n    def _process(op_queue, seen_ops):\n        reads = []\n        writes = []\n        op = op_queue.pop()\n        if op in seen_ops:\n            return (reads, writes)\n        seen_ops.add(op)\n        reads, writes = acd_utils.get_read_write_resource_inputs(op)\n        op_queue.extend((t.op for t in op.inputs if t.dtype == dtypes.variant))\n        return (reads, writes)\n    op_queue = [op]\n    seen_ops = set()\n    all_reads = []\n    all_writes = []\n    while op_queue:\n        reads, writes = _process(op_queue, seen_ops)\n        all_reads.extend(reads)\n        all_writes.extend(writes)\n    return (all_reads, all_writes)",
    "docstring": "Collects resource inputs for the given ops (and its variant inputs).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:_collect_resource_inputs arg:op arguments arg FunctionDef name:_process arg:op_queue arg:seen_ops arguments arg arg Assign Assign Assign Call If Compare Return return:yes Call Assign Call Call Compare Return return:yes Assign Assign Call Assign Assign While Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "match",
    "source_code": "def match(self, event: _ProfilerEvent):\n    raise NotImplementedError",
    "docstring": "Return True if the event matches the pattern. This method should be overriden in subclass.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_pattern_matcher.py",
    "ast_data": "FunctionDef name:match arg:self arg:event arguments arg arg Raise"
  },
  {
    "library": "kornia",
    "name": "SIFTFeature",
    "source_code": "class SIFTFeature(LocalFeature):\n\n    def __init__(self, num_features: int=8000, upright: bool=False, rootsift: bool=True, device: Optional[Device]=None, config: Optional[Detector_config]=None) -> None:\n        patch_size: int = 41\n        if device is None:\n            device = torch.device('cpu')\n        if config is None:\n            config = get_default_detector_config()\n        detector = MultiResolutionDetector(BlobDoGSingle(1.0, 1.6), num_features, config, ori_module=PassLAF() if upright else LAFOrienter(19), aff_module=PassLAF()).to(device)\n        descriptor = LAFDescriptor(SIFTDescriptor(patch_size=patch_size, rootsift=rootsift), patch_size=patch_size, grayscale_descriptor=True).to(device)\n        super().__init__(detector, descriptor)",
    "docstring": "Convenience module, which implements DoG detector + (Root)SIFT descriptor. Using without blur pyramid Still not as good as OpenCV/VLFeat because of but we are working on it",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\integrated.py",
    "ast_data": "ClassDef name:SIFTFeature FunctionDef name:__init__ arg:self arg:num_features arg:upright arg:rootsift arg:device arg:config arguments arg arg arg arg arg arg If Compare Assign Call If Compare Assign Call Assign Call Call Call Call Call Call Assign Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_call_end_hook",
    "source_code": "def _call_end_hook(self, mode):\n    if mode == ModeKeys.TRAIN:\n        self.on_train_end()\n    elif mode == ModeKeys.TEST:\n        self.on_test_end()\n    else:\n        self.on_predict_end()",
    "docstring": "Helper function for on_{train|test|predict}_end methods.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:_call_end_hook arg:self arg:mode arguments arg arg If Compare Call If Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_non_observable_arg_indexes_and_types",
    "source_code": "def get_non_observable_arg_indexes_and_types(node: Node) -> dict[Union[type, torch.dtype], Callable[[Node], list[int]]]:\n    info = NodeInfo(node.op, node.target)\n    return NON_OBSERVABLE_ARG_DICT.get(info, EMPTY_ARG_DICT)",
    "docstring": "Returns a dict with of non float tensor types as keys and values which correspond to a function to retrieve the list (which takes the node as an argument)",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py",
    "ast_data": "FunctionDef name:get_non_observable_arg_indexes_and_types arg:node arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "mesh_shape",
    "source_code": "@property\ndef mesh_shape(self):\n    return self._mesh_shape",
    "docstring": "A rank 1 int32 array describing the shape of the TPU topology.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\topology.py",
    "ast_data": "FunctionDef name:mesh_shape arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "mpl_connect",
    "source_code": "def mpl_connect(self, s, func):\n    return self.callbacks.connect(s, func)",
    "docstring": "Bind function *func* to event *s*. Parameters ---------- s : str One of the following events ids: - 'button_press_event' - 'button_release_event' - 'draw_event' - 'key_press_event' - 'key_release_event' - 'motion_notify_event' - 'pick_event' - 'resize_event' - 'scroll_event' - 'figure_enter_event', - 'figure_leave_event', - 'axes_enter_event', - 'axes_leave_event' - 'close_event'. func : callable The callback function to be executed, which must have the signature:: def func(event: Event) -> Any For the location events (button and key press/release), if the mouse is over the Axes, the `~matplotlib.axes.Axes.KeyEvent.MouseEvent.FigureCanvasBase.mpl_disconnect`. Examples -------- :: def on_press(event): print('you pressed', event.button, event.xdata, event.ydata) cid = canvas.mpl_connect('button_press_event', on_press)",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:mpl_connect arg:self arg:s arg:func arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_reduction",
    "source_code": "def _get_reduction(self):\n    if not self._allow_sum_over_batch_size and distribute_lib.has_strategy() and (self.reduction == losses_utils.ReductionV2.AUTO or self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE):\n        raise ValueError('Please use `tf.keras.losses.Reduction.SUM` or `tf.keras.losses.Reduction.NONE` for loss reduction when losses are used with `tf.distribute.Strategy` outside of the built-in training loops. You can implement `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch size like:\\n```\\nwith strategy.scope():\\n    loss_obj = tf.keras.losses.CategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE)\\n....\\n    loss = tf.reduce_sum(loss_obj(labels, predictions)) * (1. / global_batch_size)\\n```\\nPlease see https://www.tensorflow.org/tutorials/distribute/custom_training for more details.')\n    if self.reduction == losses_utils.ReductionV2.AUTO:\n        return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE\n    return self.reduction",
    "docstring": "Handles reduction cases and returns the reduction value.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py",
    "ast_data": "FunctionDef name:_get_reduction arg:self arguments arg If BoolOp Call BoolOp Compare Compare Raise Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "getsourcelines",
    "source_code": "def getsourcelines(object):\n    return _inspect.getsourcelines(tf_decorator.unwrap(object)[1])",
    "docstring": "TFDecorator-aware replacement for inspect.getsourcelines.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:getsourcelines arg:object arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "variable_scope_name",
    "source_code": "@property\ndef variable_scope_name(self):\n    if self._variable_scope:\n        name = self._variable_scope.name\n        if not name or name[-1] == '/':\n            return name\n        else:\n            return name + '/'",
    "docstring": "Returns the variable scope name created by this Template.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py",
    "ast_data": "FunctionDef name:variable_scope_name arg:self arguments arg If Assign If BoolOp Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_RecvInfo",
    "source_code": "class _RecvInfo:\n\n    def __init__(self, input_name: str, source: int, buffer: torch.Tensor):\n        self.input_name = input_name\n        self.source = source\n        self.buffer = buffer\n\n    def __repr__(self):\n        return f'_RecvInfo(input={self.input_name}, source={self.source}, shape={self.buffer.size()})'",
    "docstring": "Represents a stage input.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "ClassDef name:_RecvInfo FunctionDef name:__init__ arg:self arg:input_name arg:source arg:buffer arguments arg arg arg arg Assign Assign Assign FunctionDef name:__repr__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_HandcraftedBlock",
    "source_code": "class _HandcraftedBlock(Module):\n\n    def __init__(self) -> None:\n        super().__init__()\n        self.spatial_gradient = SpatialGradient('sobel', 1)\n\n    def forward(self, x: Tensor) -> Tensor:\n        sobel = self.spatial_gradient(x)\n        dx, dy = (sobel[:, :, 0, :, :], sobel[:, :, 1, :, :])\n        sobel_dx = self.spatial_gradient(dx)\n        dxx, dxy = (sobel_dx[:, :, 0, :, :], sobel_dx[:, :, 1, :, :])\n        sobel_dy = self.spatial_gradient(dy)\n        dyy = sobel_dy[:, :, 1, :, :]\n        hc_feats = concatenate([dx, dy, dx ** 2.0, dy ** 2.0, dx * dy, dxy, dxy ** 2.0, dxx, dyy, dxx * dyy], 1)\n        return hc_feats",
    "docstring": "Helper class for KeyNet, it defines the handcrafted filters within the Key.Net handcrafted block.",
    "type": "class",
    "file_path": "kornia\\kornia\\feature\\keynet.py",
    "ast_data": "ClassDef name:_HandcraftedBlock FunctionDef name:__init__ arg:self arguments arg Call Call Assign Call FunctionDef name:forward arg:self arg:x arguments arg arg Assign Call Assign Assign Call Assign Assign Call Assign Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "palplot",
    "source_code": "def palplot(pal, size=1):\n    n = len(pal)\n    _, ax = plt.subplots(1, 1, figsize=(n * size, size))\n    ax.imshow(np.arange(n).reshape(1, n), cmap=mpl.colors.ListedColormap(list(pal)), interpolation='nearest', aspect='auto')\n    ax.set_xticks(np.arange(n) - 0.5)\n    ax.set_yticks([-0.5, 0.5])\n    ax.set_xticklabels(['' for _ in range(n)])\n    ax.yaxis.set_major_locator(ticker.NullLocator())",
    "docstring": "Plot the values in a color palette as a horizontal array. Parameters ---------- pal : sequence of matplotlib colors colors, i.e. as returned by seaborn.color_palette() size : scaling factor for size of plot",
    "type": "function",
    "file_path": "seaborn\\seaborn\\miscplot.py",
    "ast_data": "FunctionDef name:palplot arg:pal arg:size arguments arg arg Assign Call Assign Call Call Call Call Call Call Call Call Call Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "read_value",
    "source_code": "def read_value(self):\n    raise NotImplementedError",
    "docstring": "Returns the value of this variable, read in the current context. Can be different from value() if it's on another device, with control dependencies, etc. Returns: A containing the value of the variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:read_value arg:self arguments arg Raise"
  },
  {
    "library": "django",
    "name": "generate_added_fields",
    "source_code": "def generate_added_fields(self):\n    for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):\n        self._generate_added_field(app_label, model_name, field_name)",
    "docstring": "Make AddField operations.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\autodetector.py",
    "ast_data": "FunctionDef name:generate_added_fields arg:self arguments arg For Call Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "def step(self, *args, target=None, losses: Optional[list]=None, **kwargs):\n    for stage in self._stages:\n        stage.clear_runtime_states()\n    args_split, kwargs_split = self._split_inputs(args, kwargs)\n    if target is not None:\n        targets_split = list(torch.tensor_split(target, self._n_microbatches))\n    else:\n        targets_split = None\n    self._step_microbatches(args_split, kwargs_split, targets_split, losses)\n    for stage in self._stages:\n        if stage.is_last:\n            return self._merge_outputs(stage.output_chunks)\n    return None",
    "docstring": "Run one iteration of the pipeline schedule with *whole-batch* input. Will chunk the input into microbatches automatically, and go through the microbatches according to the schedule implementation. args: positional arguments to the model (as in non-pipeline case). kwargs: keyword arguments to the model (as in non-pipeline case). target: target for the loss function. losses: a list to store the losses for each microbatch.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\schedules.py",
    "ast_data": "FunctionDef name:step arg:self arguments arg arg arg arg arg For Call Assign Call If Compare Assign Call Call Assign Call For If Return return:yes Call Return return:no"
  },
  {
    "library": "scipy",
    "name": "_validate_names",
    "source_code": "def _validate_names(typename, field_names, extra_field_names):\n    for name in [typename] + field_names + extra_field_names:\n        if not isinstance(name, str):\n            raise TypeError('typename and all field names must be strings')\n        if not name.isidentifier():\n            raise ValueError(f'typename and all field names must be valid identifiers: {name!r}')\n        if _iskeyword(name):\n            raise ValueError(f'typename and all field names cannot be a keyword: {name!r}')\n    seen = set()\n    for name in field_names + extra_field_names:\n        if name.startswith('_'):\n            raise ValueError(f'Field names cannot start with an underscore: {name!r}')\n        if name in seen:\n            raise ValueError(f'Duplicate field name: {name!r}')\n        seen.add(name)",
    "docstring": "Ensure that all the given names are valid Python identifiers that do not start with '_'. Also check that there are no duplicates among field_names + extra_field_names.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_bunch.py",
    "ast_data": "FunctionDef name:_validate_names arg:typename arg:field_names arg:extra_field_names arguments arg arg arg For If Call Raise Call If Call Raise Call If Call Raise Call Assign Call For If Call Raise Call If Compare Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "op_list",
    "source_code": "def op_list(**configs):\n    generated_configs = []\n    if 'attrs' not in configs:\n        raise ValueError('Missing attrs in configs')\n    for inputs in configs['attrs']:\n        tmp_result = {configs['attr_names'][i]: input_value for i, input_value in enumerate(inputs)}\n        generated_configs.append(tmp_result)\n    return generated_configs",
    "docstring": "Generate a list of ops organized in a specific format. It takes two parameters which are \"attr_names\" and \"attr\". attrs stores the name and function of operators. Args: configs: key-value pairs including the name and function of operators. attrs and attr_names must be present in configs. Return: a sequence of dictionaries which stores the name and function of ops in a specifal format Example: attrs = [ [\"abs\", torch.abs], [\"abs_\", torch.abs_], ] attr_names = [\"op_name\", \"op\"]. With those two examples, we will generate (({\"op_name\": \"abs\"}, {\"op\" : torch.abs}), ({\"op_name\": \"abs_\"}, {\"op\" : torch.abs_}))",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py",
    "ast_data": "FunctionDef name:op_list arguments arg Assign If Compare Raise Call For Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "version",
    "source_code": "def version():\n    ver = torch._C._nccl_version()\n    major = ver >> 32\n    minor = ver >> 16 & 65535\n    patch = ver & 65535\n    suffix = torch._C._nccl_version_suffix().decode('utf-8')\n    if suffix == '':\n        return (major, minor, patch)\n    else:\n        return (major, minor, patch, suffix)",
    "docstring": "Returns the version of the NCCL. This function returns a tuple containing the major, minor, and patch version numbers of the NCCL. The suffix is also included in the tuple if a version suffix exists. Returns: tuple: The version information of the NCCL.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\nccl.py",
    "ast_data": "FunctionDef name:version arguments Assign Call Assign Assign Assign Assign Call Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_prefetch_handle",
    "source_code": "@no_type_check\ndef _prefetch_handle(state: _FSDPState, current_handle: Optional[FlatParamHandle], prefetch_mode: _PrefetchMode) -> None:\n    if not current_handle:\n        return\n    handle = _get_handle_to_prefetch(state, current_handle)\n    if not handle:\n        return\n    prev_training_state = handle._training_state\n    if prefetch_mode == _PrefetchMode.BACKWARD:\n        handle._training_state = HandleTrainingState.BACKWARD_PRE\n    elif prefetch_mode == _PrefetchMode.FORWARD:\n        handle._training_state = HandleTrainingState.FORWARD\n    else:\n        raise ValueError(f'Invalid prefetch mode on rank {state.rank}: {prefetch_mode}')\n    _unshard(state, handle, state._unshard_stream, state._pre_unshard_stream)\n    handle._training_state = prev_training_state\n    handle._prefetched = True",
    "docstring": "Prefetches the next handles if needed (without synchronization). An empty handles key cannot prefetch.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_runtime_utils.py",
    "ast_data": "FunctionDef name:_prefetch_handle arg:state arg:current_handle arg:prefetch_mode arguments arg arg arg If Return return:no Assign Call If Return return:no Assign If Compare Assign If Compare Assign Raise Call Call Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "transform_non_affine",
    "source_code": "def transform_non_affine(self, values):\n    with np.errstate(divide='ignore', invalid='ignore'):\n        out = np.log10(values / (1 - values))\n    if self._clip:\n        out[values <= 0] = -1000\n        out[1 <= values] = 1000\n    return out",
    "docstring": "logit transform (base 10), masked or clipped",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\scale.py",
    "ast_data": "FunctionDef name:transform_non_affine arg:self arg:values arguments arg arg With Call Assign Call If Assign Compare Assign Compare Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "copy_to_dict",
    "source_code": "def copy_to_dict(self) -> dict[_SettingsKeyT, Any]:\n    settings = self.copy()\n    return settings._to_dict()",
    "docstring": "Make a copy of current settings and convert to a dict. This method returns a new dict populated with the same values and their priorities as the current settings. Modifications to the returned dict won't be reflected on the original settings. This method can be useful for example for printing settings in Scrapy shell.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\settings\\__init__.py",
    "ast_data": "FunctionDef name:copy_to_dict arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "main_hook",
    "source_code": "def main_hook(self):\n    ddp = self.ddp\n    ddp.reducer._rebuild_buckets()\n    ddp._check_and_sync_module_buffers()\n    should_sync_backwards = ddp._check_global_requires_backward_grad_sync(is_joined_rank=True)\n    ddp.require_forward_param_sync = should_sync_backwards\n    if not should_sync_backwards:\n        return\n    ddp._match_all_reduce_for_bwd_pass()\n    if ddp.find_unused_parameters:\n        ddp._match_unused_params_allreduce()\n    ddp.reducer._push_all_rebuilt_params()",
    "docstring": "Shadow the DDP collective communication operations in the forward and backward passes.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:main_hook arg:self arguments arg Assign Call Call Assign Call Assign If Return return:no Call If Call Call"
  },
  {
    "library": "numpy",
    "name": "linspace",
    "source_code": "def linspace(self, n=100, domain=None):\n    if domain is None:\n        domain = self.domain\n    x = np.linspace(domain[0], domain[1], n)\n    y = self(x)\n    return (x, y)",
    "docstring": "Return x, y values at equally spaced points in domain. Returns the x, y values at linearly spaced points across the domain. Here y is the value of the polynomial at the points x. By default the domain is the same as that of the series instance. This method is intended mostly as a plotting aid. Parameters ---------- n : int, optional Number of point pairs to return. The default value is 100. domain : {None, array_like}, optional If not None, the specified domain is used instead of that of the calling instance. It should be of the form ``. The default is None which case the class domain is used. Returns ------- x, y : ndarray x is equal to linspace(self.domain[0], self.domain[1], n) and y is the series evaluated at element of x.",
    "type": "method",
    "file_path": "numpy\\numpy\\polynomial\\_polybase.py",
    "ast_data": "FunctionDef name:linspace arg:self arg:n arg:domain arguments arg arg arg If Compare Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_trtengineop_node_op_count",
    "source_code": "def get_trtengineop_node_op_count(graphdef, node_name):\n    ops_in_engine = collections.defaultdict(int)\n    for func in graphdef.library.function:\n        if f'{node_name}_native_segment' == func.signature.name:\n            node_count = len(func.node_def)\n            for node in func.node_def:\n                ops_in_engine[node.op] += 1\n            break\n    return (node_count, ops_in_engine)",
    "docstring": "Counts the number of nodes and OP types of a given TRTEngineOp.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\utils.py",
    "ast_data": "FunctionDef name:get_trtengineop_node_op_count arg:graphdef arg:node_name arguments arg arg Assign Call For If Compare Assign Call For Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "__copy__",
    "source_code": "@abc.abstractmethod\ndef __copy__(self) -> DSAPublicKey:\n    pass",
    "docstring": "Returns a copy.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:__copy__ arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "_check_precision_matrix",
    "source_code": "def _check_precision_matrix(precision, covariance_type):\n    if not (np.allclose(precision, precision.T) and np.all(linalg.eigvalsh(precision) > 0.0)):\n        raise ValueError(\"'%s precision' should be symmetric, positive-definite\" % covariance_type)",
    "docstring": "Check a precision matrix is symmetric and positive-definite.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\mixture\\_gaussian_mixture.py",
    "ast_data": "FunctionDef name:_check_precision_matrix arg:precision arg:covariance_type arguments arg arg If BoolOp Call Call Compare Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_ops_filter_safe",
    "source_code": "def _ops_filter_safe(name: str) -> bool:\n    return name.startswith(('torch.ops.aten', 'torch.ops.fbgemm'))",
    "docstring": "An ops filter which allows pickle-safe ops. Pickle-safe ops are built-in ones where it will be possible to unpickle on any machine which has PyTorch.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\_graph_pickler.py",
    "ast_data": "FunctionDef name:_ops_filter_safe arg:name arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "write_folder",
    "source_code": "def write_folder(self, archive_dir: str, folder_dir: str) -> None:\n    assert os.path.isdir(folder_dir), f'{folder_dir} is not a valid directory path'\n    file_paths = filter(os.path.isfile, glob.glob(f'{folder_dir}/**', recursive=True))\n    for file_path in file_paths:\n        filename = os.path.relpath(file_path, folder_dir)\n        archive_path = os.path.join(archive_dir, filename)\n        self.write_file(archive_path, file_path)",
    "docstring": "Copy a folder into the archive. archive_dir: The destination folder inside the archive. folder_dir: The source folder on disk.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\pt2_archive\\_package.py",
    "ast_data": "FunctionDef name:write_folder arg:self arg:archive_dir arg:folder_dir arguments arg arg arg Call Assign Call Call For Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_set_batch_size",
    "source_code": "def _set_batch_size(self, batch_size):\n    if not self._has_valid_tensors():\n        raise ValueError('The batch size cannot be set for this model. Please use input_shapes parameter.')\n    for tensor in self._input_tensors:\n        shape = tensor.shape.as_list()\n        if shape[0] is None:\n            shape[0] = batch_size\n            tensor.set_shape(shape)",
    "docstring": "Sets the first dimension of the input tensor to . Args: batch_size: Batch size for the model. Replaces the first dimension of an input size array if undefined. (default 1) Raises: ValueError: input_tensor is not defined.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py",
    "ast_data": "FunctionDef name:_set_batch_size arg:self arg:batch_size arguments arg arg If Call Raise Call For Assign Call If Compare Assign Call"
  },
  {
    "library": "pandas",
    "name": "ordered",
    "source_code": "@property\ndef ordered(self) -> Ordered:\n    return self._ordered",
    "docstring": "Whether the categories have an ordered relationship. See Also -------- categories : An Index containing the unique categories allowed. Examples -------- >>> cat_type = pd.CategoricalDtype(categories=[\"a\", \"b\"], ordered=True) >>> cat_type.ordered True >>> cat_type = pd.CategoricalDtype(categories=[\"a\", \"b\"], ordered=False) >>> cat_type.ordered False",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py",
    "ast_data": "FunctionDef name:ordered arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "set_axis_on",
    "source_code": "def set_axis_on(self):\n    self.axison = True\n    self.stale = True",
    "docstring": "Do not hide all visual components of the x- and y-axis. This reverts the effect of a prior call. Whether the individual axis decorations are drawn is controlled by their respective visibility settings. This is on by default.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_axis_on arg:self arguments arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "expanddim_inputs_for_broadcast",
    "source_code": "def expanddim_inputs_for_broadcast(self):\n    if len(self._inputs) < 2:\n        return\n    ranks = [_rank(inp.t) if inp.is_stacked else _rank(inp.t) + 1 for inp in self._inputs]\n    if all((isinstance(rank, int) for rank in ranks)):\n        max_rank = max(ranks)\n    else:\n        max_rank = functools.reduce(math_ops.maximum, ranks)\n    for i, inp in enumerate(self._inputs):\n        if not inp.is_stacked:\n            continue\n        if isinstance(max_rank, int) and ranks[i] == max_rank:\n            continue\n        self._inputs[i] = wrap(_expand_dims(inp.t, 1, max_rank - ranks[i]), True)",
    "docstring": "Reshapes stacked inputs to prepare them for broadcast. Since stacked inputs have an extra leading dimension, automatic broadcasting rules could incorrectly try to expand dimensions before that leading dimension. To avoid that, we reshape these stacked inputs to the maximum rank they will need to be broadcasted to. IMPORTANT: This function is heavily optimized for statically known ranks because it's on the critical path of some huge training graphs.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py",
    "ast_data": "FunctionDef name:expanddim_inputs_for_broadcast arg:self arguments arg If Compare Call Return return:no Assign Call Call If Call Call Assign Call Assign Call For Call If If BoolOp Call Compare Assign Call Call"
  },
  {
    "library": "matplotlib",
    "name": "register",
    "source_code": "@classmethod\n@_api.deprecated('3.10.0', message='This method is never used internally.', alternative='No replacement.  Please open an issue if you use this.')\ndef register(cls, name, style):\n    if not issubclass(style, cls._Base):\n        raise ValueError(f'{style} must be a subclass of {cls._Base}')\n    cls._style_list[name] = style",
    "docstring": "Register a new style.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:register arg:cls arg:name arg:style arguments arg arg arg If Call Raise Call Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "sigmoid",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef sigmoid(x):\n    return nn.sigmoid(x)",
    "docstring": "Element-wise sigmoid. Args: x: A tensor or variable. Returns: A tensor.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:sigmoid arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_nandict",
    "source_code": "class _nandict(dict):\n\n    def __init__(self, mapping):\n        super().__init__(mapping)\n        for key, value in mapping.items():\n            if is_scalar_nan(key):\n                self.nan_value = value\n                break\n\n    def __missing__(self, key):\n        if hasattr(self, 'nan_value') and is_scalar_nan(key):\n            return self.nan_value\n        raise KeyError(key)",
    "docstring": "Dictionary with support for nans.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\utils\\_encode.py",
    "ast_data": "ClassDef name:_nandict FunctionDef name:__init__ arg:self arg:mapping arguments arg arg Call Call For Call If Call Assign FunctionDef name:__missing__ arg:self arg:key arguments arg arg If BoolOp Call Call Return return:yes Raise Call"
  },
  {
    "library": "django",
    "name": "identify_epsg",
    "source_code": "def identify_epsg(self):\n    capi.identify_epsg(self.ptr)",
    "docstring": "This method inspects the WKT of this SpatialReference, and will add EPSG authority nodes where an EPSG identifier is applicable.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py",
    "ast_data": "FunctionDef name:identify_epsg arg:self arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "__call__",
    "source_code": "def __call__(self) -> Union[str, None]:\n    raise NotImplementedError",
    "docstring": "Returns either self.line or None to indicate the line has been 'unwritten'",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\utils.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Raise"
  },
  {
    "library": "pytorch",
    "name": "diff_report",
    "source_code": "def diff_report(self) -> str:\n    graph_a = self.graph_a\n    graph_b = self.graph_b\n    graph_a_str = str(graph_a)\n    graph_b_str = str(graph_b)\n    if graph_a_str == graph_b_str:\n        return ''\n    graph_diff = difflib.ndiff(graph_a_str.splitlines(True), graph_b_str.splitlines(True))\n    graph_diff_report = ['Graph diff:', self._indent(''.join(graph_diff))]\n    for node_a, node_b in itertools.zip_longest(graph_a.nodes(), graph_b.nodes()):\n        if str(node_a) != str(node_b):\n            graph_diff_report.append('First diverging operator:')\n            node_diff = difflib.ndiff(str(node_a).splitlines(True), str(node_b).splitlines(True))\n            source_printout = ['node diff:', self._indent(''.join(node_diff))]\n            stack_a = node_a.sourceRange() if node_a else None\n            if stack_a:\n                source_printout.extend(['Former source location:', self._indent(str(stack_a))])\n            stack_b = node_b.sourceRange() if node_b else None\n            if stack_b:\n                source_printout.extend(['Latter source location:', self._indent(str(stack_b))])\n            graph_diff_report.extend(source_printout)\n            break\n    return '\\n'.join(graph_diff_report)",
    "docstring": "Return a string representation of the graph difference. The report shows the first pair of nodes that diverges. It also shows the source location of the pair of nodes. Returns: graph_diff_report (str): A string representation of the graph difference.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\verification.py",
    "ast_data": "FunctionDef name:diff_report arg:self arguments arg Assign Assign Assign Call Assign Call If Compare Return return:yes Assign Call Call Call Assign Call Call For Call Call Call If Compare Call Call Call Assign Call Call Call Call Call Assign Call Call Assign Call If Call Call Call Assign Call If Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "make_adapt_function",
    "source_code": "def make_adapt_function(self):\n    if self._adapt_function is not None:\n        return self._adapt_function\n\n    def adapt_step(iterator):\n        data = next(iterator)\n        self._adapt_maybe_build(data)\n        self.update_state(data)\n    if self._steps_per_execution.numpy().item() == 1:\n        adapt_fn = adapt_step\n    else:\n\n        def adapt_fn(iterator):\n            for _ in math_ops.range(self._steps_per_execution):\n                adapt_step(iterator)\n    if not self._run_eagerly:\n        adapt_fn = def_function.function(adapt_fn)\n    self._adapt_function = adapt_fn\n    return self._adapt_function",
    "docstring": "Creates a function to execute one step of . This method can be overridden to support custom adapt logic. This method is called by . Typically, this method directly controls settings, and delegates the actual state update logic to . This function is cached the first time is called. The cache is cleared whenever is called. Returns: Function. The function created by this method should accept a , retrieve a batch, and update the state of the layer.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py",
    "ast_data": "FunctionDef name:make_adapt_function arg:self arguments arg If Compare Return return:yes FunctionDef name:adapt_step arg:iterator arguments arg Assign Call Call Call If Compare Call Call Assign FunctionDef name:adapt_fn arg:iterator arguments arg For Call Call If Assign Call Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "InvalidColumnName",
    "source_code": "class InvalidColumnName(Warning):\n    pass",
    "docstring": "Warning raised by to_stata the column contains a non-valid stata name. Because the column name is an invalid Stata variable, the name needs to be converted. See Also -------- DataFrame.to_stata : Export DataFrame object to Stata dta format. Examples -------- >>> df = pd.DataFrame({\"0categories\": pd.Series([2, 2])}) >>> df.to_stata(\"test\") # doctest: +SKIP",
    "type": "class",
    "file_path": "pandas\\pandas\\errors\\__init__.py",
    "ast_data": "ClassDef name:InvalidColumnName"
  },
  {
    "library": "authlib",
    "name": "get_allowed_scope",
    "source_code": "def get_allowed_scope(self, scope):\n    raise NotImplementedError()",
    "docstring": "A method to return a list of requested scopes which are supported by this client. For instance, there is a `` column:: def get_allowed_scope(self, scope): if not scope: return \"\" allowed = set(scope_to_list(self.scope)) return list_to_scope([s for s in scope.split() if s in allowed]) :param scope: the requested scope. :return: string of scope",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py",
    "ast_data": "FunctionDef name:get_allowed_scope arg:self arg:scope arguments arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "is_split_variable",
    "source_code": "def is_split_variable(v):\n    return hasattr(v, '_variable_list') or hasattr(v, '_variables')",
    "docstring": "Returns True if is either a PartitionedVariable or a ShardedVariable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py",
    "ast_data": "FunctionDef name:is_split_variable arg:v arguments arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "django",
    "name": "TooManyFieldsSent",
    "source_code": "class TooManyFieldsSent(SuspiciousOperation):\n    pass",
    "docstring": "The number of fields in a GET or POST request exceeded settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.",
    "type": "class",
    "file_path": "django\\django\\core\\exceptions.py",
    "ast_data": "ClassDef name:TooManyFieldsSent"
  },
  {
    "library": "matplotlib",
    "name": "cmd",
    "source_code": "def cmd(expr: str, args: ParserElement) -> ParserElement:\n\n    def names(elt: ParserElement) -> T.Generator[str, None, None]:\n        if isinstance(elt, ParseExpression):\n            for expr in elt.exprs:\n                yield from names(expr)\n        elif elt.resultsName:\n            yield elt.resultsName\n    csname = expr.split('{', 1)[0]\n    err = csname + ''.join(('{%s}' % name for name in names(args))) if expr == csname else expr\n    return csname - (args | Error(f'Expected {err}'))",
    "docstring": "Helper to define TeX commands. `` where the names in the error message are taken from element names in *args*. If *expr* already includes arguments (e.g. \"\\cmd{arg}{...}\"), then they are stripped when constructing the parse element, but kept (and *expr* is used as is) in the error message.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:cmd arg:expr arg:args arguments arg arg FunctionDef name:names arg:elt arguments arg If Call For Call If Assign Call Assign Compare Call Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__setitem__",
    "source_code": "def __setitem__(self, key, value):\n    self.dicts[-1][key] = value",
    "docstring": "Set a variable in the current context",
    "type": "method",
    "file_path": "django\\django\\template\\context.py",
    "ast_data": "FunctionDef name:__setitem__ arg:self arg:key arg:value arguments arg arg arg Assign"
  },
  {
    "library": "pytorch",
    "name": "add_done_callback",
    "source_code": "def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None:\n    super().add_done_callback(callback)",
    "docstring": "Append the given callback function to this `valuethenthenthen`, or through other code in the callback, error handling must be carefully taken care of. For example, if this callback later completes additional futures, those futures are not marked as completed with an error and the user is responsible for handling completion/waiting on those futures independently. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) >>> def callback(fut): ... print(\"This will run after the future has finished.\") ... print(fut.wait()) >>> fut = torch.futures.Future() >>> fut.add_done_callback(callback) >>> fut.set_result(5) This will run after the future has finished. 5",
    "type": "method",
    "file_path": "pytorch\\torch\\futures\\__init__.py",
    "ast_data": "FunctionDef name:add_done_callback arg:self arg:callback arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "random_normal",
    "source_code": "def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):\n    if self.seed:\n        op = stateless_random_ops.stateless_random_normal\n    else:\n        op = random_ops.random_normal\n    return op(shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)",
    "docstring": "A deterministic random normal if seed is passed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py",
    "ast_data": "FunctionDef name:random_normal arg:self arg:shape arg:mean arg:stddev arg:dtype arguments arg arg arg arg arg If Assign Assign Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "laf_from_center_scale_ori",
    "source_code": "def laf_from_center_scale_ori(xy: Tensor, scale: Optional[Tensor]=None, ori: Optional[Tensor]=None) -> Tensor:\n    KORNIA_CHECK_SHAPE(xy, ['B', 'N', '2'])\n    device = xy.device\n    dtype = xy.dtype\n    B, N = xy.shape[:2]\n    if scale is None:\n        scale = torch.ones(B, N, 1, 1, device=device, dtype=dtype)\n    if ori is None:\n        ori = zeros(B, N, 1, device=device, dtype=dtype)\n    KORNIA_CHECK_SHAPE(scale, ['B', 'N', '1', '1'])\n    KORNIA_CHECK_SHAPE(ori, ['B', 'N', '1'])\n    unscaled_laf = concatenate([angle_to_rotation_matrix(ori.squeeze(-1)), xy.unsqueeze(-1)], dim=-1)\n    laf = scale_laf(unscaled_laf, scale)\n    return laf",
    "docstring": "Create a LAF from keypoint center, scale and orientation. Useful to create kornia LAFs from OpenCV keypoints. Args: xy: :math:. scale: :math:. If not provided, scale = 1.0 is assumed ori: angle in degrees :math:. If not provided orientation = 0 is assumed Returns: LAF :math:",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:laf_from_center_scale_ori arg:xy arg:scale arg:ori arguments arg arg arg Call Assign Assign Assign If Compare Assign Call If Compare Assign Call Call Call Assign Call Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "should_load_checkpoint",
    "source_code": "def should_load_checkpoint():\n    return dc_context.get_current_worker_context().experimental_should_init",
    "docstring": "Returns whether the current worker should load checkpoints. In multi-worker training, if loading checkpoint is requested by user, or needed for fault-tolerance, the cluster should load checkpoint but not necessarily every worker in the cluster should. Returns: Whether this particular worker in the cluster should load checkpoints.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py",
    "ast_data": "FunctionDef name:should_load_checkpoint arguments Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "Accent",
    "source_code": "class Accent(Char):\n\n    def _update_metrics(self) -> None:\n        metrics = self._metrics = self.fontset.get_metrics(self.font, self.font_class, self.c, self.fontsize, self.dpi)\n        self.width = metrics.xmax - metrics.xmin\n        self.height = metrics.ymax - metrics.ymin\n        self.depth = 0\n\n    def shrink(self) -> None:\n        super().shrink()\n        self._update_metrics()\n\n    def render(self, output: Output, x: float, y: float) -> None:\n        self.fontset.render_glyph(output, x - self._metrics.xmin, y + self._metrics.ymin, self.font, self.font_class, self.c, self.fontsize, self.dpi)",
    "docstring": "The font metrics need to be dealt with differently for accents, since they are already offset correctly from the baseline in TrueType fonts.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "ClassDef name:Accent FunctionDef name:_update_metrics arg:self arguments arg Assign Call Assign Assign Assign FunctionDef name:shrink arg:self arguments arg Call Call Call FunctionDef name:render arg:self arg:output arg:x arg:y arguments arg arg arg arg Call"
  },
  {
    "library": "kornia",
    "name": "dx_project_points_orthographic",
    "source_code": "def dx_project_points_orthographic(points_in_camera: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(points_in_camera, ['*', '3'])\n    return ops.ones_like(points_in_camera[..., 0:1])",
    "docstring": "Compute the derivative of the x projection with respect to the x coordinate. .. math:: \\frac{\\partial u}{\\partial x} = 1 Args: points_in_camera: Tensor representing the points to project. Returns: Tensor representing the derivative of the x projection with respect to the x coordinate. Example: >>> points = torch.tensor([1., 2., 3.]) >>> dx_project_points_orthographic(points) tensor([1.])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\projection_orthographic.py",
    "ast_data": "FunctionDef name:dx_project_points_orthographic arg:points_in_camera arguments arg Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "TriRefiner",
    "source_code": "class TriRefiner:\n\n    def __init__(self, triangulation):\n        _api.check_isinstance(Triangulation, triangulation=triangulation)\n        self._triangulation = triangulation",
    "docstring": "Abstract base class for classes implementing mesh refinement. A TriRefiner encapsulates a Triangulation object and provides tools for mesh refinement and interpolation. Derived classes must implement: - `~matplotlib.tri.TriInterpolator`, - the other optional keyword arguments *kwargs* are defined in each TriRefiner concrete implementation; and which returns (as a tuple) a refined triangular mesh and the interpolated values of the field at the refined triangulation nodes.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_trirefine.py",
    "ast_data": "ClassDef name:TriRefiner FunctionDef name:__init__ arg:self arg:triangulation arguments arg arg Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "as_dimension",
    "source_code": "def as_dimension(value):\n    if isinstance(value, Dimension):\n        return value\n    else:\n        return Dimension(value)",
    "docstring": "Converts the given value to a Dimension. A Dimension input will be returned unmodified. An input of will be converted to an unknown Dimension. An integer input will be converted to a Dimension with that value. Args: value: The value to be converted. Returns: A Dimension corresponding to the given value.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:as_dimension arg:value arguments arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "variation",
    "source_code": "@_axis_nan_policy_factory(lambda x: x, n_outputs=1, result_to_tuple=lambda x, _: (x,))\ndef variation(a, axis=0, nan_policy='propagate', ddof=0, *, keepdims=False):\n    xp = array_namespace(a)\n    a = xp.asarray(a)\n    if axis is None:\n        a = xp.reshape(a, (-1,))\n        axis = 0\n    n = xp.asarray(_length_nonmasked(a, axis=axis), dtype=a.dtype)\n    with np.errstate(divide='ignore', invalid='ignore'), warnings.catch_warnings():\n        warnings.simplefilter('ignore')\n        mean_a = xp.mean(a, axis=axis)\n        std_a = xp.std(a, axis=axis)\n        correction = (n / (n - ddof)) ** 0.5\n        result = std_a * correction / mean_a\n\n    def special_case(std_a, mean_a):\n        return xp.where(std_a > 0, xp.copysign(xp.inf, mean_a), xp.nan)\n    result = xpx.apply_where(ddof == n, (std_a, mean_a), special_case, fill_value=result)\n    return result[()] if result.ndim == 0 else result",
    "docstring": "Compute the coefficient of variation. The coefficient of variation is the standard deviation divided by the mean. This function is equivalent to:: np.std(x, axis=axis, ddof=ddof) / np.mean(x) The default for `addofddof` values: >>> x = np.array([[ 10.0, np.nan, 11.0, 19.0, 23.0, 29.0, 98.0], ... [ 29.0, 30.0, 32.0, 33.0, 35.0, 56.0, 57.0], ... [np.nan, np.nan, 12.0, 13.0, 16.0, 16.0, 17.0]]) >>> variation(x, axis=1, ddof=1, nan_policy='omit') array([1.05109361, 0.31428986, 0.146483 ])",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_variation.py",
    "ast_data": "FunctionDef name:variation arg:a arg:axis arg:nan_policy arg:ddof arguments arg arg arg arg arg Assign Call Assign Call If Compare Assign Call Assign Assign Call Call With Call Call Call Assign Call Assign Call Assign Assign FunctionDef name:special_case arg:std_a arg:mean_a arguments arg arg Return return:yes Call Compare Call Assign Call Compare Return return:yes Compare Call arguments arg arguments arg arg"
  },
  {
    "library": "pandas",
    "name": "all_none",
    "source_code": "def all_none(*args) -> bool:\n    return all((arg is None for arg in args))",
    "docstring": "Returns a boolean indicating if all arguments are None.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\common.py",
    "ast_data": "FunctionDef name:all_none arguments arg Return return:yes Call Compare"
  },
  {
    "library": "django",
    "name": "coord_seq",
    "source_code": "@property\ndef coord_seq(self):\n    if self.has_cs:\n        return self._cs.clone()",
    "docstring": "Return a clone of the coordinate sequence for this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:coord_seq arg:self arguments arg If Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_GetBatchIndices",
    "source_code": "def _GetBatchIndices(params_shape, indices, batch_dims):\n    batch_indices = indices\n    indices_dtype = indices.dtype.base_dtype\n    casted_params_shape = math_ops.cast(params_shape, indices_dtype)\n    accum_dim_value = array_ops.ones((), dtype=indices_dtype)\n    for dim in range(batch_dims, 0, -1):\n        dim_value = casted_params_shape[dim - 1]\n        accum_dim_value *= casted_params_shape[dim]\n        start = array_ops.zeros((), dtype=indices_dtype)\n        step = array_ops.ones((), dtype=indices_dtype)\n        dim_indices = math_ops.range(start, dim_value, step)\n        dim_indices *= accum_dim_value\n        dim_shape = array_ops.concat([array_ops.tile([1], [dim - 1]), [dim_value], array_ops.tile([1], [array_ops.rank(indices) - dim])], axis=0)\n        batch_indices += array_ops.reshape(dim_indices, dim_shape)\n    return batch_indices",
    "docstring": "Addds the batch offsets to the given indices and returns the results.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_GetBatchIndices arg:params_shape arg:indices arg:batch_dims arguments arg arg arg Assign Assign Assign Call Assign Call For Call Assign Assign Call Assign Call Assign Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__init__",
    "source_code": "def __init__(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None:\n    import_optional_dependency('odf')\n    super().__init__(filepath_or_buffer, storage_options=storage_options, engine_kwargs=engine_kwargs)",
    "docstring": "Read tables out of OpenDocument formatted files. Parameters ---------- filepath_or_buffer : str, path to be parsed or an open readable stream. {storage_options} engine_kwargs : dict, optional Arbitrary keyword arguments passed to excel engine.",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\excel\\_odfreader.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:filepath_or_buffer arg:storage_options arg:engine_kwargs arguments arg arg arg arg Call Call Call"
  },
  {
    "library": "pandas",
    "name": "maybe_box_datetimelike",
    "source_code": "def maybe_box_datetimelike(value: Scalar, dtype: Dtype | None=None) -> Scalar:\n    if dtype == _dtype_obj:\n        pass\n    elif isinstance(value, (np.datetime64, dt.datetime)):\n        value = Timestamp(value)\n    elif isinstance(value, (np.timedelta64, dt.timedelta)):\n        value = Timedelta(value)\n    return value",
    "docstring": "Cast scalar to Timestamp or Timedelta if scalar is datetime-like and dtype is not object. Parameters ---------- value : scalar dtype : Dtype, optional Returns ------- scalar",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\cast.py",
    "ast_data": "FunctionDef name:maybe_box_datetimelike arg:value arg:dtype arguments arg arg If Compare If Call Assign Call If Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_print_verbose_msg_init_beg",
    "source_code": "def _print_verbose_msg_init_beg(self, n_init):\n    if self.verbose == 1:\n        print('Initialization %d' % n_init)\n    elif self.verbose >= 2:\n        print('Initialization %d' % n_init)\n        self._init_prev_time = time()\n        self._iter_prev_time = self._init_prev_time",
    "docstring": "Print verbose message on initialization.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:_print_verbose_msg_init_beg arg:self arg:n_init arguments arg arg If Compare Call If Compare Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "get_cell",
    "source_code": "def get_cell(self, *labels):\n    if len(labels) != self._label_length:\n        raise ValueError('The {} expects taking {} labels'.format(self._metric_name, self._label_length))\n    return self._metric_methods[self._label_length].get_cell(self._metric, *labels)",
    "docstring": "Retrieves the cell.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:get_cell arg:self arguments arg arg If Compare Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "local_devices",
    "source_code": "@tf_export('experimental.dtensor.local_devices', v1=[])\ndef local_devices(device_type: str, for_client_id: Optional[int]=None) -> List[tf_device.DeviceSpec]:\n    if device_type.upper() not in ['CPU', 'GPU', 'TPU']:\n        raise ValueError(f'Device type {device_type} is not CPU, GPU, or TPU.')\n    if for_client_id is None:\n        for_client_id = client_id()\n    return [tf_device.DeviceSpec(job=job_name(), replica=0, task=for_client_id, device_type=device_type, device_index=i) for i in range(num_local_devices(device_type))]",
    "docstring": "Returns a list of device specs configured on this client.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py",
    "ast_data": "FunctionDef name:local_devices arg:device_type arg:for_client_id arguments arg arg If Compare Call Raise Call If Compare Assign Call Return return:yes Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "_to_timedeltaarray",
    "source_code": "def _to_timedeltaarray(self) -> TimedeltaArray:\n    from pandas.core.arrays.timedeltas import TimedeltaArray\n    pa_type = self._pa_array.type\n    assert pa.types.is_duration(pa_type)\n    np_dtype = np.dtype(f'm8[{pa_type.unit}]')\n    np_array = self._pa_array.to_numpy()\n    np_array = np_array.astype(np_dtype, copy=False)\n    return TimedeltaArray._simple_new(np_array, dtype=np_dtype)",
    "docstring": "Convert a pyarrow duration typed array to a TimedeltaArray.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py",
    "ast_data": "FunctionDef name:_to_timedeltaarray arg:self arguments arg Assign Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "map_array",
    "source_code": "def map_array(arr: ArrayLike, mapper, na_action: Literal['ignore'] | None=None) -> np.ndarray | ExtensionArray | Index:\n    from pandas import Index\n    if na_action not in (None, 'ignore'):\n        msg = f\"na_action must either be 'ignore' or None, {na_action} was passed\"\n        raise ValueError(msg)\n    if is_dict_like(mapper):\n        if isinstance(mapper, dict) and hasattr(mapper, '__missing__'):\n            dict_with_default = mapper\n            mapper = lambda x: dict_with_default[np.nan if isinstance(x, float) and np.isnan(x) else x]\n        else:\n            from pandas import Series\n            if len(mapper) == 0:\n                mapper = Series(mapper, dtype=np.float64)\n            elif isinstance(mapper, dict):\n                mapper = Series(mapper.values(), index=Index(mapper.keys(), tupleize_cols=False))\n            else:\n                mapper = Series(mapper)\n    if isinstance(mapper, ABCSeries):\n        if na_action == 'ignore':\n            mapper = mapper[mapper.index.notna()]\n        indexer = mapper.index.get_indexer(arr)\n        new_values = take_nd(mapper._values, indexer)\n        return new_values\n    if not len(arr):\n        return arr.copy()\n    values = arr.astype(object, copy=False)\n    if na_action is None:\n        return lib.map_infer(values, mapper)\n    else:\n        return lib.map_infer_mask(values, mapper, mask=isna(values).view(np.uint8))",
    "docstring": "Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[ndarray, Index, ExtensionArray] The output of the mapping function applied to the array. If the function returns a tuple with more than one element a MultiIndex will be returned.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\algorithms.py",
    "ast_data": "FunctionDef name:map_array arg:arr arg:mapper arg:na_action arguments arg arg arg If Compare Assign Raise Call If Call If BoolOp Call Call Assign Assign arguments arg BoolOp Call Call If Compare Call Assign Call If Call Assign Call Call Call Call Assign Call If Call If Compare Assign Call Assign Call Assign Call Return return:yes If Call Return return:yes Call Assign Call If Compare Return return:yes Call Return return:yes Call Call Call"
  },
  {
    "library": "kornia",
    "name": "cameras_for_ids",
    "source_code": "def cameras_for_ids(cameras: PinholeCamera, camera_ids: Union[List[int], Tensor]) -> PinholeCamera:\n    intrinsics = cameras.intrinsics[camera_ids]\n    extrinsics = cameras.extrinsics[camera_ids]\n    height = cameras.height[camera_ids]\n    width = cameras.width[camera_ids]\n    return PinholeCamera(intrinsics, extrinsics, height, width)",
    "docstring": "Take a PinholeCamera camera and camera indices to create a new PinholeCamera for requested cameras. Args: cameras: Scene camera object: PinholeCamera camera_ids: List of camera indices to copy: List[int] Return: A new PinholeCamera object with a sub-set of cameras: PinholeCamera",
    "type": "function",
    "file_path": "kornia\\kornia\\nerf\\camera_utils.py",
    "ast_data": "FunctionDef name:cameras_for_ids arg:cameras arg:camera_ids arguments arg arg Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "finish",
    "source_code": "@abc.abstractmethod\ndef finish(self):\n    pass",
    "docstring": "Finish any processing for writing the movie.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:finish arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_is_self_handle",
    "source_code": "def _is_self_handle(self, x):\n    if isinstance(x, ops.EagerTensor):\n        return x is self._handle\n    return x.op.type == 'MutexV2' and x.op.get_attr('shared_name') and (x.op.get_attr('shared_name') == self._handle.op.get_attr('shared_name')) and (x.op.device == self._handle.op.device or _get_colocation(x.op) == _get_colocation(self._handle.op))",
    "docstring": "Check if the tensor is the same Mutex as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\critical_section_ops.py",
    "ast_data": "FunctionDef name:_is_self_handle arg:self arg:x arguments arg arg If Call Return return:yes Compare Return return:yes BoolOp Compare Call Compare Call Call BoolOp Compare Compare Call Call"
  },
  {
    "library": "pytorch",
    "name": "set_tensors_dict",
    "source_code": "def set_tensors_dict(self, named_tensors: dict[str, torch.Tensor]) -> None:\n    for name, value in named_tensors.items():\n        self.set_tensor(name, value)",
    "docstring": "Set the attributes specified by the given paths to values. For example, to set the attributes mod.layer1.conv1.weight and mod.layer1.conv1.bias, use accessor.set_tensors_dict({ \"layer1.conv1.weight\": weight, \"layer1.conv1.bias\": bias, })",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py",
    "ast_data": "FunctionDef name:set_tensors_dict arg:self arg:named_tensors arguments arg arg For Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_show_part_tensor",
    "source_code": "def _show_part_tensor(tensor):\n    return _print_tensor(tensor_name, _TRACE_MODE_PART_TENSOR_SIZE, tensor, tensor)",
    "docstring": "Trace function for printing part of the tensor.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_show_part_tensor arg:tensor arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_calculate_meta_reordering_scatter_offsets",
    "source_code": "def _calculate_meta_reordering_scatter_offsets(m, meta_ncols, meta_dtype, device):\n    dst_rows = torch.arange(0, m, device=device)[:, None].repeat(1, meta_ncols)\n    dst_cols = torch.arange(0, meta_ncols, device=device).repeat(m, 1)\n    group = 32 if meta_dtype.itemsize == 2 else 16\n    interweave = 4 if meta_dtype.itemsize == 2 else 2\n    dst_rows = dst_rows // group * group + dst_rows % 8 * interweave + dst_rows % group // 8\n    topright = ((dst_rows % 2 == 0) & (dst_cols % 2 == 1)).to(torch.int8)\n    bottomleft = ((dst_rows % 2 == 1) & (dst_cols % 2 == 0)).to(torch.int8)\n    dst_rows += topright - bottomleft\n    dst_cols -= topright - bottomleft\n    interleave = 2\n    cols_maj = dst_cols // interleave\n    cols_min = dst_cols % interleave\n    return (cols_maj * m * interleave + dst_rows * interleave + cols_min).view(-1)",
    "docstring": "This is PyTorch implementation of main part of reorder_meta() function, from tools/util/include/cutlass/util/host_reorder.h file of CUTLASS source tree. Furthermore, CUTLASS template for sparse GEMM decides upon layout of this matrix, and at the moment for the sparse GEMM executed on tensor cores, this is layout described by ColumnMajorInterleaved data structure, in include/cutlass/layout/matrix.h of CUTLASS source tree. The reordering of meta matrix into meta_reordered matrix calculated according to these segments of CUTLASS code is re-implemented here. Note that this calculation produces offsets for scattering metadata matrix elements into reordered metadata matrix elements (or, equivalently, for gathering reordered metadata matrix element back into metadata matrix elements).",
    "type": "function",
    "file_path": "pytorch\\torch\\sparse\\_semi_structured_conversions.py",
    "ast_data": "FunctionDef name:_calculate_meta_reordering_scatter_offsets arg:m arg:meta_ncols arg:meta_dtype arg:device arguments arg arg arg arg Assign Call Call Assign Call Call Assign Compare Assign Compare Assign Assign Call Compare Compare Assign Call Compare Compare Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "from_string_handle",
    "source_code": "@staticmethod\ndef from_string_handle(string_handle, output_types, output_shapes=None, output_classes=None):\n    output_types = nest.map_structure(dtypes.as_dtype, output_types)\n    if output_shapes is None:\n        output_shapes = nest.map_structure(lambda _: tensor_shape.TensorShape(None), output_types)\n    else:\n        output_shapes = nest.map_structure_up_to(output_types, tensor_shape.as_shape, output_shapes)\n    if output_classes is None:\n        output_classes = nest.map_structure(lambda _: tensor.Tensor, output_types)\n    nest.assert_same_structure(output_types, output_shapes)\n    output_structure = structure.convert_legacy_structure(output_types, output_shapes, output_classes)\n    string_handle = ops.convert_to_tensor(string_handle, dtype=dtypes.string)\n    iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2(string_handle, output_types=structure.get_flat_tensor_types(output_structure), output_shapes=structure.get_flat_tensor_shapes(output_structure))\n    return Iterator(iterator_resource, None, output_types, output_shapes, output_classes)",
    "docstring": "Creates a new, uninitialized based on the given handle. This method allows you to define a \"feedable\" iterator where you can choose between concrete iterators by feeding a value in a call. In that case, would be a , and you would feed it with the value of in each step. For example, if you had two iterators that marked the current position in a training dataset and a test dataset, you could choose which to use in each step as follows: Args: string_handle: A scalar of type that evaluates to a handle produced by the method. output_types: A (nested) structure of objects corresponding to each component of an element of this dataset. output_shapes: (Optional.) A (nested) structure of objects corresponding to each component of an element of this dataset. If omitted, each component will have an unconstrainted shape. output_classes: (Optional.) A (nested) structure of Python objects corresponding to each component of an element of this iterator. If omitted, each component is assumed to be of type . Returns: An .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py",
    "ast_data": "FunctionDef name:from_string_handle arg:string_handle arg:output_types arg:output_shapes arg:output_classes arguments arg arg arg arg Assign Call If Compare Assign Call arguments arg Call Assign Call If Compare Assign Call arguments arg Call Assign Call Assign Call Assign Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_clone",
    "source_code": "def _clone(self, python_function):\n    f = Function(python_function=self._python_function if python_function is None else python_function, name=self._name, input_signature=self.input_signature, autograph=self._autograph, jit_compile=self._jit_compile, reduce_retracing=self._reduce_retracing, experimental_attributes=self._attributes, experimental_autograph_options=self._experimental_autograph_options)\n    if self._shared_rendezvous:\n        f._shared_rendezvous = self._shared_rendezvous\n    return f",
    "docstring": "Clone the function with different python function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py",
    "ast_data": "FunctionDef name:_clone arg:self arg:python_function arguments arg arg Assign Call Compare If Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_prepare",
    "source_code": "def _prepare(self, *args, **kwargs) -> None:\n    for config in self.groups:\n        module = config['module']\n        tensor_name = config['tensor_name']\n        parametrization = config.get('parametrization', FakeStructuredSparsity)\n        tensor = getattr(module, tensor_name)\n        mask = config.get('mask', torch.ones(tensor.shape[0], dtype=torch.bool, device=tensor.device))\n        self.state[config['tensor_fqn']]['mask'] = mask\n        parametrize.register_parametrization(module, tensor_name, parametrization(mask))\n        if isinstance(module, (nn.Linear, nn.Conv2d)):\n            prune_bias = config.get('prune_bias', True)\n            if module.bias is not None:\n                module.register_parameter('_bias', nn.Parameter(module.bias.detach()))\n                module.bias = None\n                module.prune_bias = prune_bias\n            module.register_forward_hook(BiasHook(module.parametrizations.weight[0], prune_bias))",
    "docstring": "This function will attach the FakeStructuredSparsity parameterizations and BiasHooks at the appropriate points in the model.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\base_structured_sparsifier.py",
    "ast_data": "FunctionDef name:_prepare arg:self arguments arg arg arg For Assign Assign Assign Call Assign Call Assign Call Call Assign Call Call If Call Assign Call If Compare Call Call Call Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_mapped_trackable",
    "source_code": "def get_mapped_trackable(trackable, object_map):\n    if object_map is None:\n        return trackable\n    else:\n        return object_map.get(trackable, trackable)",
    "docstring": "Returns the mapped trackable if possible, otherwise returns trackable.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\util.py",
    "ast_data": "FunctionDef name:get_mapped_trackable arg:trackable arg:object_map arguments arg arg If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "task_id",
    "source_code": "@property\ndef task_id(self):\n    return getattr(self, '_task_id', None)",
    "docstring": "Returns the task id this indicates. In TensorFlow distributed environment, each job may have an applicable task id, which is the index of the instance within its task type. This is useful when user needs to run specific code according to task index. For example, Returns if such information is not available or is not applicable in the current distributed environment, such as training with . For more information, please see 's class docstring.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py",
    "ast_data": "FunctionDef name:task_id arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_pad_dense_input",
    "source_code": "@classmethod\ndef _pad_dense_input(cls, dense_input: torch.Tensor) -> torch.Tensor:\n    assert dense_input.dim() == 2\n    m, n = dense_input.shape\n    min_rows = cls._DTYPE_SHAPE_CONSTRAINTS[dense_input.dtype].dense_min_rows\n    min_cols = cls._DTYPE_SHAPE_CONSTRAINTS[dense_input.dtype].dense_min_cols\n    to_pad_m = -m % min_rows if m < min_rows or m % min_rows else 0\n    to_pad_n = -n % min_cols if n < min_cols or n % min_rows else 0\n    if to_pad_m or to_pad_n:\n        return torch.nn.functional.pad(dense_input, (0, to_pad_n, 0, to_pad_m))\n    else:\n        return dense_input",
    "docstring": "Calculates padding for dense tensor and pads tensor if necessary. If padding is not required, this function returns the original tensor.",
    "type": "method",
    "file_path": "pytorch\\torch\\sparse\\semi_structured.py",
    "ast_data": "FunctionDef name:_pad_dense_input arg:cls arg:dense_input arguments arg arg Compare Call Assign Assign Assign Assign BoolOp Compare Assign BoolOp Compare If BoolOp Return return:yes Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "reduce_prod",
    "source_code": "@tf_export('math.reduce_prod', 'reduce_prod', v1=[])\n@dispatch.add_dispatch_support\ndef reduce_prod(input_tensor, axis=None, keepdims=False, name=None):\n    keepdims = False if keepdims is None else bool(keepdims)\n    return _may_reduce_to_scalar(keepdims, axis, gen_math_ops.prod(input_tensor, _ReductionDims(input_tensor, axis), keepdims, name=name))",
    "docstring": "Computes of elements across dimensions of a tensor. This is the reduction operation for the elementwise op. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each entry in . If is true, the reduced dimensions are retained with length 1. If is None, all dimensions are reduced, and a tensor with a single element is returned. For example: >>> x = tf.constant([[1., 2.], [3., 4.]]) >>> tf.math.reduce_prod(x) >>> tf.math.reduce_prod(x, 0) >>> tf.math.reduce_prod(x, 1) Args: input_tensor: The tensor to reduce. Should have numeric type. axis: The dimensions to reduce. If (the default), reduces all dimensions. Must be in the range . keepdims: If true, retains reduced dimensions with length 1. name: A name for the operation (optional). Returns: The reduced tensor. @compatibility(numpy) Equivalent to np.prod @end_compatibility",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py",
    "ast_data": "FunctionDef name:reduce_prod arg:input_tensor arg:axis arg:keepdims arg:name arguments arg arg arg arg Assign Compare Call Return return:yes Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "kleene_xor",
    "source_code": "def kleene_xor(left: bool | np.ndarray | libmissing.NAType, right: bool | np.ndarray | libmissing.NAType, left_mask: np.ndarray | None, right_mask: np.ndarray | None) -> tuple[npt.NDArray[np.bool_], npt.NDArray[np.bool_]]:\n    if left_mask is None:\n        return kleene_xor(right, left, right_mask, left_mask)\n    if not isinstance(left, np.ndarray):\n        raise TypeError('Either `left` or `right` need to be a np.ndarray.')\n    raise_for_nan(right, method='xor')\n    if right is libmissing.NA:\n        result = np.zeros_like(left)\n    else:\n        result = left ^ right\n    if right_mask is None:\n        if right is libmissing.NA:\n            mask = np.ones_like(left_mask)\n        else:\n            mask = left_mask.copy()\n    else:\n        mask = left_mask | right_mask\n    return (result, mask)",
    "docstring": "Boolean `leftright` value is a scalar. Returns ------- result, mask: ndarray[bool] The result of the logical xor, and the new mask.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\mask_ops.py",
    "ast_data": "FunctionDef name:kleene_xor arg:left arg:right arg:left_mask arg:right_mask arguments arg arg arg arg If Compare Return return:yes Call If Call Raise Call Call If Compare Assign Call Assign If Compare If Compare Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "promote_joins",
    "source_code": "def promote_joins(self, aliases):\n    aliases = list(aliases)\n    while aliases:\n        alias = aliases.pop(0)\n        if self.alias_map[alias].join_type is None:\n            continue\n        assert self.alias_map[alias].join_type is not None\n        parent_alias = self.alias_map[alias].parent_alias\n        parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER\n        already_louter = self.alias_map[alias].join_type == LOUTER\n        if (self.alias_map[alias].nullable or parent_louter) and (not already_louter):\n            self.alias_map[alias] = self.alias_map[alias].promote()\n            aliases.extend((join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases))",
    "docstring": "Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\sql\\query.py",
    "ast_data": "FunctionDef name:promote_joins arg:self arg:aliases arguments arg arg Assign Call While Assign Call If Compare Compare Assign Assign BoolOp Compare Assign Compare If BoolOp BoolOp Assign Call Call BoolOp Compare Compare"
  },
  {
    "library": "kornia",
    "name": "RgbToRaw",
    "source_code": "class RgbToRaw(Module):\n    ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1]\n    ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 1, -1, -1]\n\n    def __init__(self, cfa: CFA) -> None:\n        super().__init__()\n        self.cfa = cfa\n\n    def forward(self, image: torch.Tensor) -> torch.Tensor:\n        return rgb_to_raw(image, cfa=self.cfa)",
    "docstring": "Module to convert a RGB image to bayer raw version of image. The image data is assumed to be in the range of (0, 1). Shape: - image: :math: - output: :math: reference: Example: >>> rgbinput = torch.rand(2, 3, 4, 6) >>> raw = RgbToRaw(CFA.GB) >>> output = raw(rgbinput) # 2x1x4x6",
    "type": "class",
    "file_path": "kornia\\kornia\\color\\raw.py",
    "ast_data": "ClassDef name:RgbToRaw FunctionDef name:__init__ arg:self arg:cfa arguments arg arg Call Call Assign FunctionDef name:forward arg:self arg:image arguments arg arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "brent",
    "source_code": "def brent(func, args=(), brack=None, tol=1.48e-08, full_output=0, maxiter=500):\n    options = {'xtol': tol, 'maxiter': maxiter}\n    res = _minimize_scalar_brent(func, brack, args, **options)\n    if full_output:\n        return (res['x'], res['fun'], res['nit'], res['nfev'])\n    else:\n        return res['x']",
    "docstring": "Given a function of one variable and a possible bracket, return a local minimizer of the function isolated to a fractional precision of tol. Parameters ---------- func : callable f(x,*args) Objective function. args : tuple, optional Additional arguments (if present). brack : tuple, optional Either a triple ``xa >> def f(x): ... return (x-1)**2 >>> from scipy import optimize >>> minimizer = optimize.brent(f, brack=(1, 2)) >>> minimizer 1 >>> res = optimize.brent(f, brack=(-1, 0.5, 2), full_output=True) >>> xmin, fval, iter, funcalls = res >>> f(xmin), fval (0.0, 0.0)",
    "type": "function",
    "file_path": "scipy\\scipy\\optimize\\_optimize.py",
    "ast_data": "FunctionDef name:brent arg:func arg:args arg:brack arg:tol arg:full_output arg:maxiter arguments arg arg arg arg arg arg Assign Assign Call If Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "__exit__",
    "source_code": "def __exit__(self, type: Optional[type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType]):\n    if not self._enable or type:\n        return\n    all_procs_joined = False\n    is_last_joiner = True\n    i = 0\n    WARN_THRESHOLD = 1000\n    warnings.simplefilter('once')\n    while not all_procs_joined:\n        if i > WARN_THRESHOLD:\n            warnings.warn(f'Detected uneven input skew of greater than {WARN_THRESHOLD}. This means that rank {self._rank} has at least {WARN_THRESHOLD} fewer inputs than other currently-active ranks. This level of skew could lead to performance degradation during training.')\n        num_nonjoined_procs = self._get_num_nonjoined_procs()\n        if num_nonjoined_procs == 0:\n            all_procs_joined = True\n        else:\n            if self._throw_on_early_termination:\n                self._notify_procs_to_terminate()\n            for join_hook in self._join_hooks:\n                join_hook.main_hook()\n            is_last_joiner = False\n            i += 1\n    for join_hook in self._join_hooks:\n        join_hook.post_hook(is_last_joiner)",
    "docstring": "Repeatedly runs the main hooks until all processes join; then, runs the post-hooks. Raises: RuntimeError If ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py",
    "ast_data": "FunctionDef name:__exit__ arg:self arg:type arg:value arg:traceback arguments arg arg arg arg If BoolOp Return return:no Assign Assign Assign Assign Call While If Compare Call Assign Call If Compare Assign If Call For Call Assign For Call"
  },
  {
    "library": "matplotlib",
    "name": "get_tick_params",
    "source_code": "def get_tick_params(self, which='major'):\n    _api.check_in_list(['major', 'minor'], which=which)\n    if which == 'major':\n        return self._translate_tick_params(self._major_tick_kw, reverse=True)\n    return self._translate_tick_params(self._minor_tick_kw, reverse=True)",
    "docstring": "Get appearance parameters for ticks, ticklabels, and gridlines. .. versionadded:: 3.7 Parameters ---------- which : {'major', 'minor'}, default: 'major' The group of ticks for which the parameters are retrieved. Returns ------- dict Properties for styling tick elements added to the axis. Notes ----- This method returns the appearance parameters for styling *new* elements added to this axis and may be different from the values on current elements if they were modified directly by the user (e.g., via `` methods on individual tick objects). Examples -------- :: >>> ax.yaxis.set_tick_params(labelsize=30, labelcolor='red', ... direction='out', which='major') >>> ax.yaxis.get_tick_params(which='major') {'direction': 'out', 'left': True, 'right': False, 'labelleft': True, 'labelright': False, 'gridOn': False, 'labelsize': 30, 'labelcolor': 'red'} >>> ax.yaxis.get_tick_params(which='minor') {'left': True, 'right': False, 'labelleft': True, 'labelright': False, 'gridOn': False}",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:get_tick_params arg:self arg:which arguments arg arg Call If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "validate_attr",
    "source_code": "def validate_attr(self, append) -> None:\n    if append:\n        existing_fields = getattr(self.attrs, self.kind_attr, None)\n        if existing_fields is not None and existing_fields != list(self.values):\n            raise ValueError('appended items do not match existing items in table!')\n        existing_dtype = getattr(self.attrs, self.dtype_attr, None)\n        if existing_dtype is not None and existing_dtype != self.dtype:\n            raise ValueError('appended items dtype do not match existing items dtype in table!')",
    "docstring": "validate that we have the same order as the existing & same dtype",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:validate_attr arg:self arg:append arguments arg arg If Assign Call If BoolOp Compare Compare Call Raise Call Assign Call If BoolOp Compare Compare Raise Call"
  },
  {
    "library": "pytorch",
    "name": "RepeatedExpr",
    "source_code": "class RepeatedExpr(PatternExpr):\n\n    def __init__(self, inner_pattern: _TargetExpr) -> None:\n        super().__init__()\n        self.inner_pattern = inner_pattern\n        self.op = inner_pattern.op\n\n    @property\n    def fns(self) -> Sequence[FnsType]:\n        return self.inner_pattern.fns\n\n    def _match(self, node: torch.fx.Node, ctx: MatchContext) -> MatchResult:\n        m = ctx.match(self.inner_pattern, node)\n        if not is_match(m):\n            return m\n        ctx.pattern_to_node.pop(self.inner_pattern)\n        for anchor_node in self.inner_pattern.find_anchor_nodes(ctx, OrderedSet()):\n            anchor_m = MatchContext([self], graph=node.graph).match(self.inner_pattern, anchor_node)\n            if not is_match(anchor_m):\n                return anchor_m\n            m.extend(anchor_m)\n        return m\n\n    def pattern_eq(self, other: Any) -> bool:\n        other = typing.cast(Self, other)\n        return super().pattern_eq(other) and self.inner_pattern.pattern_eq(other.inner_pattern)",
    "docstring": "Checks for a repeated pattern. Useful for repeated operations after a node such as or",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py",
    "ast_data": "ClassDef name:RepeatedExpr FunctionDef name:__init__ arg:self arg:inner_pattern arguments arg arg Call Call Assign Assign FunctionDef name:fns arg:self arguments arg Return return:yes FunctionDef name:_match arg:self arg:node arg:ctx arguments arg arg arg Assign Call If Call Return return:yes Call For Call Call Assign Call Call If Call Return return:yes Call Return return:yes FunctionDef name:pattern_eq arg:self arg:other arguments arg arg Assign Call Return return:yes BoolOp Call Call Call"
  },
  {
    "library": "pandas",
    "name": "set_info",
    "source_code": "def set_info(self, info) -> None:\n    idx = info.get(self.name)\n    if idx is not None:\n        self.__dict__.update(idx)",
    "docstring": "set my state from the passed info",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:set_info arg:self arg:info arguments arg arg Assign Call If Compare Call"
  },
  {
    "library": "scipy",
    "name": "diff_files",
    "source_code": "def diff_files(sha):\n    res = subprocess.run(['git', 'diff', '--name-only', '--diff-filter=ACMR', '-z', sha, '--', '*.py', '*.pyx', '*.pxd', '*.pxi'], stdout=subprocess.PIPE, encoding='utf-8')\n    res.check_returncode()\n    return [f for f in res.stdout.split('\\x00') if f]",
    "docstring": "Find the diff since the given SHA.",
    "type": "function",
    "file_path": "scipy\\tools\\lint.py",
    "ast_data": "FunctionDef name:diff_files arg:sha arguments arg Assign Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "normalize_pixel_coordinates",
    "source_code": "def normalize_pixel_coordinates(pixel_coordinates: Tensor, height: int, width: int, eps: float=1e-08) -> Tensor:\n    if pixel_coordinates.shape[-1] != 2:\n        raise ValueError(f'Input pixel_coordinates must be of shape (*, 2). Got {pixel_coordinates.shape}')\n    hw: Tensor = stack([tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype)])\n    factor: Tensor = tensor(2.0, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps)\n    return factor * pixel_coordinates - 1",
    "docstring": "Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates: the grid with pixel coordinates. Shape can be :math:. width: the maximum width in the x-axis. height: the maximum height in the y-axis. eps: safe division by zero. Return: the normalized pixel coordinates with shape :math:. Examples: >>> coords = tensor([[50., 100.]]) >>> normalize_pixel_coordinates(coords, 100, 50) tensor([[1.0408, 1.0202]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\conversions.py",
    "ast_data": "FunctionDef name:normalize_pixel_coordinates arg:pixel_coordinates arg:height arg:width arg:eps arguments arg arg arg arg If Compare Raise Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "validate",
    "source_code": "def validate(self, value):\n    super().validate(value)\n    if value and (not self.valid_value(value)):\n        raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value})",
    "docstring": "Validate that the input is in self.choices.",
    "type": "method",
    "file_path": "django\\django\\forms\\fields.py",
    "ast_data": "FunctionDef name:validate arg:self arg:value arguments arg arg Call Call If BoolOp Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "resolve_shape_to_proxy",
    "source_code": "def resolve_shape_to_proxy(shape: list[Union[int, torch.SymInt]], bound_symbols: dict[Any, Any]):\n    from torch.utils._sympy.interp import sympy_interp\n    from torch.utils._sympy.reference import PythonReferenceAnalysis\n    ret = []\n    for s in shape:\n        if isinstance(s, torch.SymInt):\n            ret.append(sympy_interp(PythonReferenceAnalysis, bound_symbols, s.node.expr))\n        else:\n            assert isinstance(s, int)\n            ret.append(s)\n    return ret",
    "docstring": "Given a list of symints/ints, this function returns a calculated expression of bound_symbols' values. When we trace this function, we'll get a graph with call_function nodes that describes how the shape expr is computed from bound_symbols' values. Suppose shape = (s1*s2, s1+s2) and bound_symbols = {s1: arg0, s2: arg1}, the result will be (arg0 * arg1, arg0 + arg1).",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py",
    "ast_data": "FunctionDef name:resolve_shape_to_proxy arg:shape arg:bound_symbols arguments arg arg Assign For If Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_type",
    "source_code": "def get_type(value):\n    if isinstance(value, type_spec.TypeSpec):\n        return value.value_type()\n    else:\n        return type(value)",
    "docstring": "Returns the type of if it is a TypeSpec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:get_type arg:value arguments arg If Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "write_text",
    "source_code": "def write_text(text: str) -> str:\n    return write(text, 'txt')[1]",
    "docstring": "Write the to a file and return the path computed based on the hash.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\codecache.py",
    "ast_data": "FunctionDef name:write_text arg:text arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "weekend_to_monday",
    "source_code": "def weekend_to_monday(dt: datetime) -> datetime:\n    if dt.weekday() == 6:\n        return dt + timedelta(1)\n    elif dt.weekday() == 5:\n        return dt + timedelta(2)\n    return dt",
    "docstring": "If holiday falls on Sunday or Saturday, use day thereafter (Monday) instead. Needed for holidays such as Christmas observation in Europe",
    "type": "function",
    "file_path": "pandas\\pandas\\tseries\\holiday.py",
    "ast_data": "FunctionDef name:weekend_to_monday arg:dt arguments arg If Compare Call Return return:yes Call If Compare Call Return return:yes Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "Norm",
    "source_code": "@dataclass\nclass Norm(Move):\n    func: Union[Callable, str] = 'max'\n    where: Optional[str] = None\n    by: Optional[list[str]] = None\n    percent: bool = False\n    group_by_orient: ClassVar[bool] = False\n\n    def _norm(self, df, var):\n        if self.where is None:\n            denom_data = df[var]\n        else:\n            denom_data = df.query(self.where)[var]\n        df[var] = df[var] / denom_data.agg(self.func)\n        if self.percent:\n            df[var] = df[var] * 100\n        return df\n\n    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale]) -> DataFrame:\n        other = {'x': 'y', 'y': 'x'}[orient]\n        return groupby.apply(data, self._norm, other)",
    "docstring": "Divisive scaling on the value axis after aggregating within groups. Parameters ---------- func : str or callable Function called on each group to define the comparison value. where : str Query string defining the subset used to define the comparison values. by : list of variables Variables used to define aggregation groups. percent : bool If True, multiply the result by 100. Examples -------- .. include:: ../docstrings/objects.Norm.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\moves.py",
    "ast_data": "ClassDef name:Norm FunctionDef name:_norm arg:self arg:df arg:var arguments arg arg arg If Compare Assign Assign Call Assign Call If Assign Return return:yes FunctionDef name:__call__ arg:self arg:data arg:groupby arg:orient arg:scales arguments arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "clear_runtime_states",
    "source_code": "def clear_runtime_states(self) -> None:\n    self.fwd_cache.clear()\n    self.output_chunks.clear()\n    for recv_tuple in self.args_recv_info.values():\n        for a in recv_tuple:\n            if isinstance(a, _RecvInfo):\n                a.buffer.grad = None",
    "docstring": "Clear runtime states of the stage.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py",
    "ast_data": "FunctionDef name:clear_runtime_states arg:self arguments arg Call Call For Call For If Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "string_table",
    "source_code": "def string_table(self):\n    return self._string_table",
    "docstring": "Returns a list of strings to store in pprof's string_table.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py",
    "ast_data": "FunctionDef name:string_table arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._vars[0].dtype",
    "docstring": "The dtype of all s in this object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_replicated_variable.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "wait_for",
    "source_code": "async def wait_for(self, signal):\n    d = Deferred()\n\n    def handle():\n        self.disconnect(handle, signal)\n        d.callback(None)\n    self.connect(handle, signal)\n    await maybe_deferred_to_future(d)",
    "docstring": "Await the next *signal*. See :ref: for an example.",
    "type": "method",
    "file_path": "scrapy\\scrapy\\signalmanager.py",
    "ast_data": "AsyncFunctionDef name:wait_for arg:self arg:signal arguments arg arg Assign Call FunctionDef name:handle arguments Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_on_device_or_primary",
    "source_code": "def _get_on_device_or_primary(self):\n    if values_util.is_saving_non_distributed():\n        return self._primary\n    replica_id = values_util.get_current_replica_id_as_int()\n    if replica_id is None:\n        current_device = device_util.canonicalize(device_util.current())\n        for i, value in enumerate(self._values):\n            if device_util.canonicalize(value.device) == current_device:\n                return self._get_replica(i)\n        return self._get_replica(0)\n    else:\n        return self._get_replica(replica_id)",
    "docstring": "Returns value in same replica or device if possible, else the _primary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_get_on_device_or_primary arg:self arguments arg If Call Return return:yes Assign Call If Compare Assign Call Call For Call If Compare Call Return return:yes Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "double",
    "source_code": "def double(self) -> Self:\n    return self._apply(lambda t: t.double() if t.is_floating_point() else t)",
    "docstring": "Casts all floating point parameters and buffers to `` datatype. .. note:: This method modifies the module in-place. Returns: Module: self",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:double arg:self arguments arg Return return:yes Call arguments arg Call Call"
  },
  {
    "library": "pytorch",
    "name": "_OutOfProcessFxCompile",
    "source_code": "class _OutOfProcessFxCompile(_SerializedFxCompile):\n\n    @override\n    @final\n    def _send_to_child(self, pickled_input: _WireProtocolPickledInput) -> _WireProtocolPickledOutput:\n        f = self._send_to_child_async(pickled_input)\n        return f.result()\n\n    @abstractmethod\n    def _send_to_child_async(self, pickled_input: _WireProtocolPickledInput) -> Future[_WireProtocolPickledOutput]:\n        ...\n\n    def _postprocess(self, output: _WireProtocolOutput) -> None:\n        CachedMetricsHelper.apply_deltas(output.metrics)\n        if GraphLowering.save_output_code is not None:\n            GraphLowering.save_output_code(output.graph.source_code)\n\n        @functools.lru_cache(None)\n        def getLogger(name: str) -> logging.Logger:\n            return logging.getLogger(name)\n        if output.warning_replay:\n            for w in output.warning_replay:\n                warnings.warn_explicit(message=w.message, category=w.category, filename=w.filename, lineno=w.lineno, source=w.source)\n        for record in output.logs:\n            logger = getLogger(record.name)\n            logger.handle(record)",
    "docstring": "Represents an FxCompile which is run outside the current process (in either a subprocess or possibly even a separate machine).",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py",
    "ast_data": "ClassDef name:_OutOfProcessFxCompile FunctionDef name:_send_to_child arg:self arg:pickled_input arguments arg arg Assign Call Return return:yes Call FunctionDef name:_send_to_child_async arg:self arg:pickled_input arguments arg arg FunctionDef name:_postprocess arg:self arg:output arguments arg arg Call If Compare Call FunctionDef name:getLogger arg:name arguments arg Return return:yes Call Call If For Call For Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "solvevec",
    "source_code": "def solvevec(self, rhs, adjoint=False, name='solve'):\n    with self._name_scope(name):\n        block_dimensions = self._block_domain_dimensions() if adjoint else self._block_range_dimensions()\n        if linear_operator_util.arg_is_blockwise(block_dimensions, rhs, -1):\n            for i, block in enumerate(rhs):\n                if not isinstance(block, linear_operator.LinearOperator):\n                    block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)\n                    self._check_input_dtype(block)\n                    block_dimensions[i].assert_is_compatible_with(block.shape[-1])\n                    rhs[i] = block\n            rhs_mat = [array_ops.expand_dims(block, axis=-1) for block in rhs]\n            solution_mat = self.solve(rhs_mat, adjoint=adjoint)\n            return [array_ops.squeeze(x, axis=-1) for x in solution_mat]\n        rhs = tensor_conversion.convert_to_tensor_v2_with_dispatch(rhs, name='rhs')\n        self._check_input_dtype(rhs)\n        op_dimension = self.domain_dimension if adjoint else self.range_dimension\n        op_dimension.assert_is_compatible_with(rhs.shape[-1])\n        rhs_mat = array_ops.expand_dims(rhs, axis=-1)\n        solution_mat = self.solve(rhs_mat, adjoint=adjoint)\n        return array_ops.squeeze(solution_mat, axis=-1)",
    "docstring": "Solve single equation with best effort: . The returned will be close to an exact solution if is well conditioned. Otherwise closeness will vary. See class docstring for details. Examples: Args: rhs: with same as this operator, or list of s (for blockwise operators). s are treated as [batch] vectors, meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility regarding batch dimensions. adjoint: Python . If , solve the system involving the adjoint of this : . name: A name scope to use for ops added by this method. Returns: with shape and same as . Raises: NotImplementedError: If or is False.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_block_diag.py",
    "ast_data": "FunctionDef name:solvevec arg:self arg:rhs arg:adjoint arg:name arguments arg arg arg arg With Call Assign Call Call If Call For Call If Call Assign Call Call Call Assign Assign Call Assign Call Return return:yes Call Assign Call Call Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "bind_with_defaults",
    "source_code": "def bind_with_defaults(self, args, kwargs, default_values):\n    bound_arguments = self.bind(*args, **kwargs)\n    bound_arguments.apply_defaults()\n    with_default_args = collections.OrderedDict()\n    for name, value in bound_arguments.arguments.items():\n        if value is CAPTURED_DEFAULT_VALUE:\n            with_default_args[name] = default_values[name]\n        else:\n            with_default_args[name] = value\n    for arg_name in with_default_args:\n        constraint = self.parameters[arg_name].type_constraint\n        if constraint:\n            with_default_args[arg_name] = constraint.cast(with_default_args[arg_name], trace_type.InternalCastContext(allow_specs=True))\n    bound_arguments = inspect.BoundArguments(self, with_default_args)\n    return bound_arguments",
    "docstring": "Returns BoundArguments with default values filled in.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py",
    "ast_data": "FunctionDef name:bind_with_defaults arg:self arg:args arg:kwargs arg:default_values arguments arg arg arg arg Assign Call Call Assign Call For Call If Compare Assign Assign For Assign If Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_replace_nan",
    "source_code": "def _replace_nan(a, val):\n    a = np.asanyarray(a)\n    if a.dtype == np.object_:\n        mask = np.not_equal(a, a, dtype=bool)\n    elif issubclass(a.dtype.type, np.inexact):\n        mask = np.isnan(a)\n    else:\n        mask = None\n    if mask is not None:\n        a = np.array(a, subok=True, copy=True)\n        np.copyto(a, val, where=mask)\n    return (a, mask)",
    "docstring": "If is of inexact type, make a copy of , replace NaNs with the value, and return the copy together with a boolean mask marking the locations where NaNs were present. If is not of inexact type, do nothing and return together with a mask of None. Note that scalars will end up as array scalars, which is important for using the result as the value of the out argument in some operations. Parameters ---------- a : array-like Input array. val : float NaN values are set to val before doing the operation. Returns ------- y : ndarray If is of inexact type, return a copy of with the NaNs replaced by the fill value, otherwise return . mask: {bool, None} If is of inexact type, return a boolean mask marking locations of NaNs, otherwise return None.",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_nanfunctions_impl.py",
    "ast_data": "FunctionDef name:_replace_nan arg:a arg:val arguments arg arg Assign Call If Compare Assign Call If Call Assign Call Assign If Compare Assign Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "random_samples_indices",
    "source_code": "def random_samples_indices(iters: int, rdims: Tensor, dv: torch.device) -> Tensor:\n    rands = torch.rand(size=(iters, 2, rdims.shape[0]), device=dv)\n    scaled_rands = rands * (rdims - 1e-08).float()\n    rand_samples_rel = scaled_rands.long()\n    return rand_samples_rel",
    "docstring": "Randomly sample indices of tensor.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\adalam\\utils.py",
    "ast_data": "FunctionDef name:random_samples_indices arg:iters arg:rdims arg:dv arguments arg arg arg Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "add_input",
    "source_code": "def add_input(self, *args, **kwargs):\n    self._benchmark.add_input(*args, **kwargs)",
    "docstring": "Store a single input to a module into the benchmark memory and keep it there. During the benchmark execution every thread is going to pick up a random input from the all the inputs ever supplied to the benchmark via this function.",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\throughput_benchmark.py",
    "ast_data": "FunctionDef name:add_input arg:self arguments arg arg arg Call"
  },
  {
    "library": "pandas",
    "name": "apply",
    "source_code": "@staticmethod\n@abc.abstractmethod\ndef apply(data: Series | DataFrame | np.ndarray, func: AggFuncType, args: tuple, kwargs: dict[str, Any], decorator: Callable, axis: Axis):\n    pass",
    "docstring": "Executor method to run functions by an axis. While we can see `` is implemented accordingly.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:apply arg:data arg:func arg:args arg:kwargs arg:decorator arg:axis arguments arg arg arg arg arg arg"
  },
  {
    "library": "pytorch",
    "name": "raise_comms",
    "source_code": "def raise_comms(snodes: list[BaseSchedulerNode]) -> list[BaseSchedulerNode]:\n    return _schedule_for_comm(snodes, raise_comms=True, sink_waits=False, reorder_for_overlap=False)",
    "docstring": "Greedily schedules comms as early as possible.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\comms.py",
    "ast_data": "FunctionDef name:raise_comms arg:snodes arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "no_sync",
    "source_code": "@contextmanager\ndef no_sync(self):\n    old_require_backward_grad_sync = self.require_backward_grad_sync\n    self.require_backward_grad_sync = False\n    try:\n        yield\n    finally:\n        self.require_backward_grad_sync = old_require_backward_grad_sync",
    "docstring": "Context manager to disable gradient synchronizations across DDP processes. Within this context, gradients will be accumulated on module variables, which will later be synchronized in the first forward-backward pass exiting the context. Example:: >>> # xdoctest: +SKIP(\"undefined variables\") >>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg) >>> with ddp.no_sync(): >>> for input in inputs: >>> ddp(input).backward() # no synchronization, accumulate grads >>> ddp(another_input).backward() # synchronize grads .. warning:: The forward pass should be included inside the context manager, or else gradients will still be synchronized.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\parallel\\distributed.py",
    "ast_data": "FunctionDef name:no_sync arg:self arguments arg Assign Assign Try Assign"
  },
  {
    "library": "tensorflow",
    "name": "AnalysisResult",
    "source_code": "class AnalysisResult:\n    pass",
    "docstring": "This class represents an analysis result and how it should be logged. This class must provide the following fields: * : The log level to which this detection should be logged * : The message that should be logged for this detection For an example, see .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py",
    "ast_data": "ClassDef name:AnalysisResult"
  },
  {
    "library": "scikit-learn",
    "name": "_predict",
    "source_code": "def _predict(self, X):\n    return np.asarray([est.predict(X) for est in self.estimators_]).T",
    "docstring": "Collect results from clf.predict calls.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py",
    "ast_data": "FunctionDef name:_predict arg:self arg:X arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "Transform",
    "source_code": "class Transform(abc.ABC):\n    module: torch.fx.GraphModule\n    'The module to be transformed.'\n    fake_mode: fake_tensor.FakeTensorMode | None\n    'The existing fake mode detected from `self.module`.'\n\n    def __init__(self, module: torch.fx.GraphModule):\n        self.module = module\n        self.fake_mode = self._detect_fake_mode()\n\n    def _detect_fake_mode(self) -> fake_tensor.FakeTensorMode | None:\n        fake_tensors = [node.meta.get('val') for node in self.module.graph.nodes]\n        with unset_fake_temporarily():\n            return torch._dynamo.utils.detect_fake_mode(fake_tensors)\n\n    def _maybe_fakefy_args(self, fake_mode: fake_tensor.FakeTensorMode | None, *args: Any) -> tuple[Any, ...]:\n        if fake_mode is None:\n            return args\n        return tuple((fake_mode.from_tensor(t) if isinstance(t, torch.Tensor) else t for t in args))\n\n    @abc.abstractmethod\n    def _run(self, *args, **kwargs) -> torch.fx.GraphModule:\n        ...\n\n    def run(self, *args, **kwargs) -> torch.fx.GraphModule:\n        return self._run(*args, **kwargs)",
    "docstring": "Base class for FX graph transformations to be used by FX-ONNX exporter. Similar to _, specializations of this class execute the FX graph Node-by-Node. Methods in the class can be overridden to customize the behavior of the model. This pattern can be useful for many things, including writing code transformations as well as analysis passes. The following methods can be overridden:: _run() +-- run_node() +-- placeholder() +-- get_attr() +-- call_function() +-- call_method() +-- call_module() +-- output() One important aspect to note is that if the transformation modifies the model input and/or output signature, (e.g. additional inputs/outputs are added to the model), :class: and/or :class: are needed to reconcile :attr:. That is, the model signature and the model representation must match. TODO(bowbao): Add more overridable methods in call hierarchy TODO(bowbao): Create an example once more overridable methods are added.",
    "type": "class",
    "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\_pass.py",
    "ast_data": "ClassDef name:Transform FunctionDef name:__init__ arg:self arg:module arguments arg arg Assign Assign Call FunctionDef name:_detect_fake_mode arg:self arguments arg Assign Call With Call Return return:yes Call FunctionDef name:_maybe_fakefy_args arg:self arg:fake_mode arguments arg arg arg If Compare Return return:yes Return return:yes Call Call Call FunctionDef name:_run arg:self arguments arg arg arg FunctionDef name:run arg:self arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_get_reshape_kernel",
    "source_code": "def _get_reshape_kernel(kd: int, ky: int, kx: int) -> Tensor:\n    numel: int = kd * ky * kx\n    weight = eye(numel)\n    return weight.view(numel, kd, ky, kx)",
    "docstring": "Return neigh2channels conv kernel.",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\siftdesc.py",
    "ast_data": "FunctionDef name:_get_reshape_kernel arg:kd arg:ky arg:kx arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_callback",
    "source_code": "def set_callback(self, property_name, callback):\n    if property_name not in self._config:\n        raise KeyError('%s is not a valid property name.' % property_name)\n    if not callable(callback):\n        raise TypeError('The callback object provided is not callable.')\n    self._set_callbacks[property_name] = callback",
    "docstring": "Set a set-callback for given property. Args: property_name: Name of the property. callback: The callback as a of signature: def cbk(config): where config is the config after it is set to the new value. The callback is invoked each time the set() method is called with the matching property_name. Raises: KeyError: If property_name does not exist. TypeError: If is not callable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\cli_config.py",
    "ast_data": "FunctionDef name:set_callback arg:self arg:property_name arg:callback arguments arg arg arg If Compare Raise Call If Call Raise Call Assign"
  },
  {
    "library": "pytorch",
    "name": "insert_custom_op_guards",
    "source_code": "def insert_custom_op_guards(gm: torch.fx.GraphModule, ops_to_guard: set[str]) -> None:\n    for node in gm.graph.nodes:\n        if node.op == 'call_function' and str(node.target) in ops_to_guard:\n            with _set_node_metadata_hook(gm, functools.partial(_node_metadata_hook, stack_trace=node.meta.get('stack_trace'))), gm.graph.inserting_before(node):\n                for arg in (*node.args, *node.kwargs.values()):\n                    if isinstance(arg, torch.fx.Node) and isinstance(arg.meta.get('val'), torch.Tensor):\n                        val = arg.meta['val']\n                        gm.graph.call_function(torch.ops.aten._assert_tensor_metadata.default, args=(arg,), kwargs={'dtype': val.dtype, 'device': val.device, 'layout': val.layout})\n    gm.recompile()",
    "docstring": "This is used by draft_export to insert guards in front of calls to custom operators which have a generated fake kernel.",
    "type": "function",
    "file_path": "pytorch\\torch\\_export\\passes\\insert_custom_op_guards.py",
    "ast_data": "FunctionDef name:insert_custom_op_guards arg:gm arg:ops_to_guard arguments arg arg For If BoolOp Compare Compare Call With Call Call Call Call For Call If BoolOp Call Call Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "model_proto",
    "source_code": "@property\ndef model_proto(self) -> onnx.ModelProto:\n    return ir.serde.serialize_model(self.model)",
    "docstring": "Return the ONNX `` object.",
    "type": "method",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py",
    "ast_data": "FunctionDef name:model_proto arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_BaseFilter",
    "source_code": "class _BaseFilter(SelectorMixin, BaseEstimator):\n    _parameter_constraints: dict = {'score_func': [callable]}\n\n    def __init__(self, score_func):\n        self.score_func = score_func\n\n    @_fit_context(prefer_skip_nested_validation=True)\n    def fit(self, X, y=None):\n        if y is None:\n            X = validate_data(self, X, accept_sparse=['csr', 'csc'])\n        else:\n            X, y = validate_data(self, X, y, accept_sparse=['csr', 'csc'], multi_output=True)\n        self._check_params(X, y)\n        score_func_ret = self.score_func(X, y)\n        if isinstance(score_func_ret, (list, tuple)):\n            self.scores_, self.pvalues_ = score_func_ret\n            self.pvalues_ = np.asarray(self.pvalues_)\n        else:\n            self.scores_ = score_func_ret\n            self.pvalues_ = None\n        self.scores_ = np.asarray(self.scores_)\n        return self\n\n    def _check_params(self, X, y):\n        pass\n\n    def __sklearn_tags__(self):\n        tags = super().__sklearn_tags__()\n        tags.target_tags.required = True\n        tags.input_tags.sparse = True\n        return tags",
    "docstring": "Initialize the univariate feature selection. Parameters ---------- score_func : callable Function taking two arrays X and y, and returning a pair of arrays (scores, pvalues) or a single array with scores.",
    "type": "class",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_univariate_selection.py",
    "ast_data": "ClassDef name:_BaseFilter FunctionDef name:__init__ arg:self arg:score_func arguments arg arg Assign FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg If Compare Assign Call Assign Call Call Assign Call If Call Assign Assign Call Assign Assign Assign Call Return return:yes Call FunctionDef name:_check_params arg:self arg:X arg:y arguments arg arg arg FunctionDef name:__sklearn_tags__ arg:self arguments arg Assign Call Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_all_reduce",
    "source_code": "def _all_reduce(self, reduce_op, value, replica_id, options):\n    raise NotImplementedError('_all_reduce must be implemented in descendants.')",
    "docstring": "All-reduce the across all replicas so that all get the result. can be a nested structure of tensors or . The implementation should generally batch the all-reduces when possible. can be set to hint the batching behavior. This API must be called in a replica context. Args: reduce_op: A value specifying how values should be combined. value: Value to be reduced. A tensor or a nested structure of tensors or . replica_id: An integer indicating the id of the replica where this all_reduce is called under. This is the local replica id that ranges from 0 to len(local_devices) - 1. options: A . Returns: A tensor/IndexedSlices or a nested structure of tensors/IndexedSlices with the reduced values. The structure is the same as .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:_all_reduce arg:self arg:reduce_op arg:value arg:replica_id arg:options arguments arg arg arg arg arg Raise Call"
  },
  {
    "library": "scipy",
    "name": "default_xp",
    "source_code": "@contextmanager\ndef default_xp(xp: ModuleType) -> Generator[None, None, None]:\n    token = _default_xp_ctxvar.set(xp)\n    try:\n        yield\n    finally:\n        _default_xp_ctxvar.reset(token)",
    "docstring": "In all `xp` is the namespace for the desired array (the second parameter of the tests).",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api.py",
    "ast_data": "FunctionDef name:default_xp arg:xp arguments arg Assign Call Try Call"
  },
  {
    "library": "pytorch",
    "name": "encode_varint",
    "source_code": "def encode_varint(n: int) -> list[int]:\n    assert n >= 0\n    b = [n & 63]\n    n >>= 6\n    while n > 0:\n        b[-1] |= 64\n        b.append(n & 63)\n        n >>= 6\n    return b",
    "docstring": "6-bit chunk encoding of an unsigned integer See",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py",
    "ast_data": "FunctionDef name:encode_varint arg:n arguments arg Compare Assign While Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "experimental_type_proto",
    "source_code": "@classmethod\ndef experimental_type_proto(cls) -> Type[types_pb2.SerializedDType]:\n    return types_pb2.SerializedDType",
    "docstring": "Returns the type of proto associated with DType serialization.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py",
    "ast_data": "FunctionDef name:experimental_type_proto arg:cls arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "random",
    "source_code": "def random(self, n: IntNumber=1, *, workers: IntNumber=1) -> np.ndarray:\n    sample = self._random(n, workers=workers)\n    if self.optimization_method is not None:\n        sample = self.optimization_method(sample)\n    self.num_generated += n\n    return sample",
    "docstring": "Draw in the half-open interval `Haltonn10^3`. Returns ------- sample : array_like (n, d) QMC sample.",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_qmc.py",
    "ast_data": "FunctionDef name:random arg:self arg:n arguments arg arg arg Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "to_discrete",
    "source_code": "def to_discrete(self, dt, method='zoh', alpha=None):\n    return TransferFunction(*cont2discrete((self.num, self.den), dt, method=method, alpha=alpha)[:-1], dt=dt)",
    "docstring": "Returns the discretized system. Parameters: See for details. Returns ------- sys: instance of and",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_discrete arg:self arg:dt arg:method arg:alpha arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "def decision_function(self, X):\n    decision = self._raw_predict(X)\n    if decision.shape[1] == 1:\n        decision = decision.ravel()\n    return decision",
    "docstring": "Compute the decision function of ``. Parameters ---------- X : array-like, shape (n_samples, n_features) The input samples. Returns ------- decision : ndarray, shape (n_samples,) or (n_samples, n_trees_per_iteration) The raw predicted values (i.e. the sum of the trees leaves) for each sample. n_trees_per_iteration is equal to the number of classes in multiclass classification.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg Assign Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_decayed_lr",
    "source_code": "def _decayed_lr(self, var_dtype):\n    lr_t = self._get_hyper('learning_rate', var_dtype)\n    if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):\n        local_step = math_ops.cast(self.iterations, var_dtype)\n        lr_t = math_ops.cast(lr_t(local_step), var_dtype)\n    if self._initial_decay > 0.0:\n        local_step = math_ops.cast(self.iterations, var_dtype)\n        decay_t = math_ops.cast(self._initial_decay, var_dtype)\n        lr_t = lr_t / (1.0 + decay_t * local_step)\n    return lr_t",
    "docstring": "Get decayed learning rate as a Tensor with dtype=var_dtype.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py",
    "ast_data": "FunctionDef name:_decayed_lr arg:self arg:var_dtype arguments arg arg Assign Call If Call Assign Call Assign Call Call If Compare Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_run_graph_benchmark",
    "source_code": "def _run_graph_benchmark(self, iterable, iters, warmup, session_config, initializer=None):\n    deltas = []\n    if context.executing_eagerly():\n        raise RuntimeError('Graph mode benchmarking is not supported in eager mode.')\n    for _ in range(iters):\n        with session.Session(config=session_config) as sess:\n            if warmup:\n                if initializer:\n                    sess.run(initializer)\n                sess.run(iterable)\n            if initializer:\n                sess.run(initializer)\n            start = time.time()\n            sess.run(iterable)\n            end = time.time()\n        deltas.append(end - start)\n    return np.median(deltas)",
    "docstring": "Benchmarks the iterable in graph mode. Runs the iterable times. In each iteration, the benchmark measures the time it takes to go execute the iterable. Args: iterable: The tf op or tf.data Dataset to benchmark. iters: Number of times to repeat the timing. warmup: If true, warms up the session caches by running an untimed run. session_config: A ConfigProto protocol buffer with configuration options for the session. Applicable only for benchmarking in graph mode. initializer: The initializer op required to initialize the iterable. Returns: A float, representing the median time (with respect to ) it takes for the iterable to be executed num of times. Raises: RuntimeError: When executed in eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\benchmarks\\benchmark_base.py",
    "ast_data": "FunctionDef name:_run_graph_benchmark arg:self arg:iterable arg:iters arg:warmup arg:session_config arg:initializer arguments arg arg arg arg arg arg Assign If Call Raise Call For Call With Call If If Call Call If Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_tensor_index_in_iterable",
    "source_code": "def _get_tensor_index_in_iterable(iterable, t):\n    for i, elem in enumerate(iterable):\n        if t is elem:\n            return i\n    raise ValueError(f'Element `{t!r}` is not found in iterable `{iterable!r}`.')",
    "docstring": "Returns index of first occurrence of , raises ValueError if not found.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2_indexed_slices_rewriter.py",
    "ast_data": "FunctionDef name:_get_tensor_index_in_iterable arg:iterable arg:t arguments arg arg For Call If Compare Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "uid",
    "source_code": "def uid() -> int:\n    return pywrap_tfe.TFE_Py_UID()",
    "docstring": "A unique (within this program execution) integer.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:uid arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "as_saver_def",
    "source_code": "def as_saver_def(self):\n    return self.saver_def",
    "docstring": "Generates a representation of this saver. Returns: A proto.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py",
    "ast_data": "FunctionDef name:as_saver_def arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_safe_set_output",
    "source_code": "def _safe_set_output(estimator, *, transform=None):\n    set_output_for_transform = hasattr(estimator, 'transform') or (hasattr(estimator, 'fit_transform') and transform is not None)\n    if not set_output_for_transform:\n        return\n    if not hasattr(estimator, 'set_output'):\n        raise ValueError(f'Unable to configure output for {estimator} because `set_output` is not available.')\n    return estimator.set_output(transform=transform)",
    "docstring": "Safely call estimator.set_output and error if it not available. This is used by meta-estimators to set the output for child estimators. Parameters ---------- estimator : estimator instance Estimator instance. transform : {\"default\", \"pandas\", \"polars\"}, default=None Configure output of the following estimator's methods: - - If , this operation is a no-op. Returns ------- estimator : estimator instance Estimator instance.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\_set_output.py",
    "ast_data": "FunctionDef name:_safe_set_output arg:estimator arguments arg arg Assign BoolOp Call BoolOp Call Compare If Return return:no If Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "validate_non_overlapping_shards_metadata",
    "source_code": "def validate_non_overlapping_shards_metadata(shards: list[ShardMetadata]):\n    if not shards or len(shards) == 1:\n        return\n    sharded_dims: list[int] = []\n    for dim in range(len(shards[0].shard_offsets)):\n        for i in range(1, len(shards)):\n            if shards[i].shard_offsets[dim] != shards[0].shard_offsets[dim] or shards[i].shard_sizes[dim] != shards[0].shard_sizes[dim]:\n                sharded_dims.append(dim)\n                break\n    pair: Optional[tuple[int, int]] = None\n    if len(sharded_dims) == 0:\n        all_zeros: bool = all((shard.shard_offsets == [0] * len(shards[0].shard_offsets) and math.prod(shard.shard_sizes) == 0 for shard in shards))\n        if all_zeros:\n            return\n        pair = (0, 1)\n    elif len(sharded_dims) == 1:\n        pair = _find_1d_overlapping_shards(shards, sharded_dims[0])\n    else:\n        pair = _find_nd_overlapping_shards(shards, sharded_dims)\n    if pair:\n        raise ValueError(f'Shards {shards[pair[0]]} and {shards[pair[1]]} overlap')",
    "docstring": "Ensures none of the shards overlap with each other. Args: shards(List[ShardMetadata]): List of :class: objects representing each shard. Raises: `` if there's overlap in any two shards.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\_internals.py",
    "ast_data": "FunctionDef name:validate_non_overlapping_shards_metadata arg:shards arguments arg If BoolOp Compare Call Return return:no For Call Call For Call Call If BoolOp Compare Compare Call If Compare Call Call BoolOp Compare Call Compare Call If Return return:no Assign If Compare Call Assign Call Assign Call If Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_ListCodec",
    "source_code": "class _ListCodec:\n\n    def can_encode(self, pyobj):\n        return isinstance(pyobj, list)\n\n    def do_encode(self, list_value, encode_fn):\n        encoded_list = struct_pb2.StructuredValue()\n        encoded_list.list_value.CopyFrom(struct_pb2.ListValue())\n        for element in list_value:\n            encoded_list.list_value.values.add().CopyFrom(encode_fn(element))\n        return encoded_list\n\n    def can_decode(self, value):\n        return value.HasField('list_value')\n\n    def do_decode(self, value, decode_fn):\n        return [decode_fn(element) for element in value.list_value.values]",
    "docstring": "Codec for lists.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\nested_structure_coder.py",
    "ast_data": "ClassDef name:_ListCodec FunctionDef name:can_encode arg:self arg:pyobj arguments arg arg Return return:yes Call FunctionDef name:do_encode arg:self arg:list_value arg:encode_fn arguments arg arg arg Assign Call Call Call For Call Call Call Return return:yes FunctionDef name:can_decode arg:self arg:value arguments arg arg Return return:yes Call FunctionDef name:do_decode arg:self arg:value arg:decode_fn arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "most_specific_compatible_shape",
    "source_code": "def most_specific_compatible_shape(self, other) -> 'TensorShape':\n    other = as_shape(other)\n    if self.dims is None or other.dims is None or self.rank != other.rank:\n        return unknown_shape()\n    dims = [d1 if d1 is not None and d2 is not None and (d1 == d2) else None for d1, d2 in zip(self.dims, other.dims)]\n    return TensorShape(dims)",
    "docstring": "Returns the most specific TensorShape compatible with and . * TensorShape([None, 1]) is the most specific TensorShape compatible with both TensorShape([2, 1]) and TensorShape([5, 1]). Note that TensorShape(None) is also compatible with above mentioned TensorShapes. * TensorShape([1, 2, 3]) is the most specific TensorShape compatible with both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more less specific TensorShapes compatible with above mentioned TensorShapes, e.g. TensorShape([1, 2, None]), TensorShape(None). Args: other: Another . Returns: A which is the most specific compatible shape of and .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:most_specific_compatible_shape arg:self arg:other arguments arg arg Assign Call If BoolOp Compare Compare Compare Return return:yes Call Assign BoolOp Compare Compare Compare Call Return return:yes Call"
  },
  {
    "library": "seaborn",
    "name": "unique_markers",
    "source_code": "def unique_markers(n):\n    markers = ['o', 'X', (4, 0, 45), 'P', (4, 0, 0), (4, 1, 0), '^', (4, 1, 45), 'v']\n    s = 5\n    while len(markers) < n:\n        a = 360 / (s + 1) / 2\n        markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])\n        s += 1\n    return markers[:n]",
    "docstring": "Build an arbitrarily long list of unique marker styles for points. Parameters ---------- n : int Number of unique marker specs to generate. Returns ------- markers : list of string or tuples Values for defining :class: objects. All markers will be filled.",
    "type": "function",
    "file_path": "seaborn\\seaborn\\_base.py",
    "ast_data": "FunctionDef name:unique_markers arg:n arguments arg Assign Assign While Compare Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_feature_key_name",
    "source_code": "def get_feature_key_name(self):\n    if self.is_categorical_column_weighted():\n        return self.categorical_column.categorical_column.name\n    return self.categorical_column.name",
    "docstring": "get_feature_key_name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column.py",
    "ast_data": "FunctionDef name:get_feature_key_name arg:self arguments arg If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_IndexedSlicesToTensorNoWarning",
    "source_code": "def _IndexedSlicesToTensorNoWarning(indexed_slices):\n    if not isinstance(indexed_slices, indexed_slices_lib.IndexedSlices):\n        return indexed_slices\n    if indexed_slices.dense_shape is None:\n        raise ValueError('Tensor conversion requested for IndexedSlices without dense_shape: %s' % str(indexed_slices))\n    return math_ops.unsorted_segment_sum(indexed_slices.values, indexed_slices.indices, indexed_slices.dense_shape[0])",
    "docstring": "Converts an IndexedSlices to a Tensor without sparse->dense warnings.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_grad.py",
    "ast_data": "FunctionDef name:_IndexedSlicesToTensorNoWarning arg:indexed_slices arguments arg If Call Return return:yes If Compare Raise Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_BesselJ1Grad",
    "source_code": "@ops.RegisterGradient('BesselJ1')\ndef _BesselJ1Grad(op: ops.Operation, grad):\n    x = op.inputs[0]\n    y = op.outputs[0]\n    with ops.control_dependencies([grad]):\n        dy_dx = array_ops.where_v2(math_ops.equal(x, 0.0), math_ops.cast(0.5, x.dtype), special_math_ops.bessel_j0(x) - math_ops.div(y, x))\n        return grad * dy_dx",
    "docstring": "Compute gradient of bessel_j1(x) with respect to its argument.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\math_grad.py",
    "ast_data": "FunctionDef name:_BesselJ1Grad arg:op arg:grad arguments arg arg Assign Assign With Call Assign Call Call Call Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "from_dict",
    "source_code": "@classmethod\ndef from_dict(cls, convert_custom_config_dict: dict[str, Any]) -> ConvertCustomConfig:\n    conf = cls()\n    for quant_type_name, custom_module_mapping in convert_custom_config_dict.get(OBSERVED_TO_QUANTIZED_DICT_KEY, {}).items():\n        quant_type = _quant_type_from_str(quant_type_name)\n        for observed_class, quantized_class in custom_module_mapping.items():\n            conf.set_observed_to_quantized_mapping(observed_class, quantized_class, quant_type)\n    conf.set_preserved_attributes(convert_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, []))\n    return conf",
    "docstring": "Create a `` This function is primarily for backward compatibility and may be removed in the future.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py",
    "ast_data": "FunctionDef name:from_dict arg:cls arg:convert_custom_config_dict arguments arg arg Assign Call For Call Call Assign Call For Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "rotation",
    "source_code": "@property\ndef rotation(self) -> So3:\n    return self._rotation",
    "docstring": "Return the underlying .",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py",
    "ast_data": "FunctionDef name:rotation arg:self arguments arg Return return:yes"
  },
  {
    "library": "scipy",
    "name": "get_workers",
    "source_code": "def get_workers():\n    return getattr(_config, 'default_workers', 1)",
    "docstring": "Returns the default number of workers within the current context Examples -------- >>> from scipy import fft >>> fft.get_workers() 1 >>> with fft.set_workers(4): ... fft.get_workers() 4",
    "type": "function",
    "file_path": "scipy\\scipy\\fft\\_pocketfft\\helper.py",
    "ast_data": "FunctionDef name:get_workers arguments Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "enable_cudnn_sdp",
    "source_code": "def enable_cudnn_sdp(enabled: bool):\n    torch._C._set_sdp_use_cudnn(enabled)",
    "docstring": ".. warning:: This flag is beta and subject to change. Enables or disables cuDNN scaled dot product attention.",
    "type": "function",
    "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py",
    "ast_data": "FunctionDef name:enable_cudnn_sdp arg:enabled arguments arg Call"
  },
  {
    "library": "pytorch",
    "name": "view_inference_rule",
    "source_code": "@register_inference_rule('reshape')\n@register_inference_rule('view')\ndef view_inference_rule(n: Node, symbols, constraints, counter):\n    assert isinstance(n.args[0], Node)\n    my_view, counter = gen_tvar(counter)\n    symbols[n] = my_view\n    src_var = symbols[n.args[0]]\n    t2 = [symbols[elem] if isinstance(elem, Node) else elem for elem in n.args[1:]]\n    t2_type = []\n    num_constraints = []\n    for t in t2:\n        if t == -1:\n            var, counter = gen_dvar(counter)\n            t2_type.append(var)\n            num_constraints.append(BinConstraintD(var, Dyn, op_neq))\n        else:\n            num_constraints.append(BinConstraintD(t, Dyn, op_neq))\n            t2_type.append(t)\n    t2_type = TensorType(t2_type)\n    c1 = BinConstraintT(my_view, t2_type, op_eq)\n    c2 = CanReshape(src_var, t2_type)\n    return ([c1, c2] + num_constraints, counter)",
    "docstring": "Similar to reshape but with an extra condition on the strides",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py",
    "ast_data": "FunctionDef name:view_inference_rule arg:n arg:symbols arg:constraints arg:counter arguments arg arg arg arg Call Assign Call Assign Assign Assign Call Assign Assign For If Compare Assign Call Call Call Call Call Call Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "pygame",
    "name": "__init__",
    "source_code": "def __init__(self, f, callback=None, errback=None):\n    self.f = f\n    self.exception = None\n    self.result = None\n    self.callback = callback\n    self.errback = errback",
    "docstring": "f - is the function we that we call callback(result) - this is called when the function(f) returns errback(exception) - this is called when the function(f) raises an exception.",
    "type": "method",
    "file_path": "pygame\\src_py\\threads\\__init__.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:f arg:callback arg:errback arguments arg arg arg arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "wait",
    "source_code": "def wait(self, timeout: float=-1, period: float=1) -> Optional[RunProcsResult]:\n    if timeout == 0:\n        return self._poll()\n    if timeout < 0:\n        timeout = sys.maxsize\n    expiry = time.time() + timeout\n    while time.time() < expiry:\n        pr = self._poll()\n        if pr:\n            return pr\n        time.sleep(period)\n    return None",
    "docstring": "Wait for the specified `` when the signals received. It is up to the consumer of the code to properly handle the exception. It is important not to swallow the exception otherwise the process would not terminate. Example of the typical workflow can be: .. code-block:: python pc = start_processes(...) try: pc.wait(1) .. do some other work except SignalException as e: pc.shutdown(e.sigval, timeout=30) If SIGTERM or SIGINT occurs, the code above will try to shutdown child processes by propagating received signal. If child processes will not terminate in the timeout time, the process will send the SIGKILL.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py",
    "ast_data": "FunctionDef name:wait arg:self arg:timeout arg:period arguments arg arg arg If Compare Return return:yes Call If Compare Assign Assign Call While Compare Call Assign Call If Return return:yes Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_calculate_scores",
    "source_code": "def _calculate_scores(self, query, key):\n    scores = math_ops.matmul(query, key, transpose_b=True)\n    if self.scale is not None:\n        scores *= self.scale\n    return scores",
    "docstring": "Calculates attention scores as a query-key dot product. Args: query: Query tensor of shape . key: Key tensor of shape . Returns: Tensor of shape .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\dense_attention.py",
    "ast_data": "FunctionDef name:_calculate_scores arg:self arg:query arg:key arguments arg arg arg Assign Call If Compare Return return:yes"
  },
  {
    "library": "kornia",
    "name": "_normalize_input",
    "source_code": "@staticmethod\ndef _normalize_input(x: torch.Tensor, eps: float=1e-07) -> torch.Tensor:\n    if not is_mps_tensor_safe(x):\n        sp, mp = torch.std_mean(x, dim=(-3, -2, -1), keepdim=True)\n    else:\n        mp = torch.mean(x, dim=(-3, -2, -1), keepdim=True)\n        sp = torch.std(x, dim=(-3, -2, -1), keepdim=True)\n    return (x - mp.detach()) / (sp.detach() + eps)",
    "docstring": "Normalize the input by batch.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\hardnet.py",
    "ast_data": "FunctionDef name:_normalize_input arg:x arg:eps arguments arg arg If Call Assign Call Assign Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "minimum_filter1d",
    "source_code": "@_ni_docstrings.docfiller\ndef minimum_filter1d(input, size, axis=-1, output=None, mode='reflect', cval=0.0, origin=0):\n    input = np.asarray(input)\n    if np.iscomplexobj(input):\n        raise TypeError('Complex type not supported')\n    axis = normalize_axis_index(axis, input.ndim)\n    if size < 1:\n        raise RuntimeError('incorrect filter size')\n    output = _ni_support._get_output(output, input)\n    if size // 2 + origin < 0 or size // 2 + origin >= size:\n        raise ValueError('invalid origin')\n    mode = _ni_support._extend_mode_to_code(mode)\n    _nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval, origin, 1)\n    return output",
    "docstring": "Calculate a 1-D minimum filter along the given axis. The lines of the array along the given axis are filtered with a minimum filter of given size. Parameters ---------- %(input)s size : int length along which to calculate 1D minimum %(axis)s %(output)s %(mode_reflect)s %(cval)s %(origin)s Returns ------- result : ndarray. Filtered image. Has the same shape as . Notes ----- This function implements the MINLIST algorithm [1]_, as described by Richard Harter [2]_, and has a guaranteed O(n) performance, being the length, regardless of filter size. References ---------- .. [1] .. [2] Examples -------- >>> from scipy.ndimage import minimum_filter1d >>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3) array([2, 0, 0, 0, 1, 1, 0, 0])",
    "type": "function",
    "file_path": "scipy\\scipy\\ndimage\\_filters.py",
    "ast_data": "FunctionDef name:minimum_filter1d arg:input arg:size arg:axis arg:output arg:mode arg:cval arg:origin arguments arg arg arg arg arg arg arg Assign Call If Call Raise Call Assign Call If Compare Raise Call Assign Call If BoolOp Compare Compare Raise Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ConvReLU1d",
    "source_code": "class ConvReLU1d(_FusedModule):\n\n    def __init__(self, conv, relu):\n        assert type_before_parametrizations(conv) == Conv1d and type_before_parametrizations(relu) == ReLU, f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(relu)}'\n        super().__init__(conv, relu)",
    "docstring": "This is a sequential container which calls the Conv1d and ReLU modules. During quantization this will be replaced with the corresponding fused module.",
    "type": "class",
    "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py",
    "ast_data": "ClassDef name:ConvReLU1d FunctionDef name:__init__ arg:self arg:conv arg:relu arguments arg arg arg BoolOp Compare Call Compare Call Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "can_pan",
    "source_code": "def can_pan(self):\n    return True",
    "docstring": "Return whether this Axes supports any pan/zoom button functionality.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:can_pan arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_create_variables",
    "source_code": "def _create_variables(self, num_clusters):\n    init_value = array_ops.placeholder_with_default([], shape=None)\n    cluster_centers = variable_v1.VariableV1(init_value, name=CLUSTERS_VAR_NAME, validate_shape=False)\n    cluster_centers_initialized = variable_v1.VariableV1(False, dtype=dtypes.bool, name='initialized')\n    if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:\n        cluster_centers_updated = variable_v1.VariableV1(init_value, name='clusters_updated', validate_shape=False)\n        update_in_steps = variable_v1.VariableV1(self._mini_batch_steps_per_iteration, dtype=dtypes.int64, name='update_in_steps')\n        cluster_counts = variable_v1.VariableV1(array_ops.zeros([num_clusters], dtype=dtypes.int64))\n    else:\n        cluster_centers_updated = cluster_centers\n        update_in_steps = None\n        cluster_counts = variable_v1.VariableV1(array_ops.ones([num_clusters], dtype=dtypes.int64)) if self._use_mini_batch else None\n    return (cluster_centers, cluster_centers_initialized, cluster_counts, cluster_centers_updated, update_in_steps)",
    "docstring": "Creates variables. Args: num_clusters: an integer Tensor providing the number of clusters. Returns: Tuple with following elements: - cluster_centers: a Tensor for storing cluster centers - cluster_centers_initialized: bool Variable indicating whether clusters are initialized. - cluster_counts: a Tensor for storing counts of points assigned to this cluster. This is used by mini-batch training. - cluster_centers_updated: Tensor representing copy of cluster centers that are updated every step. - update_in_steps: numbers of steps left before we sync cluster_centers_updated back to cluster_centers.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\clustering_ops.py",
    "ast_data": "FunctionDef name:_create_variables arg:self arg:num_clusters arguments arg arg Assign Call Assign Call Assign Call If BoolOp Compare Assign Call Assign Call Assign Call Call Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "should_extension_dispatch",
    "source_code": "def should_extension_dispatch(left: ArrayLike, right: Any) -> bool:\n    return isinstance(left, ABCExtensionArray) or isinstance(right, ABCExtensionArray)",
    "docstring": "Identify cases where Series operation should dispatch to ExtensionArray method. Parameters ---------- left : np.ndarray or ExtensionArray right : object Returns ------- bool",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\dispatch.py",
    "ast_data": "FunctionDef name:should_extension_dispatch arg:left arg:right arguments arg arg Return return:yes BoolOp Call Call"
  },
  {
    "library": "kornia",
    "name": "forward",
    "source_code": "def forward(self, laf: torch.Tensor, img: torch.Tensor) -> torch.Tensor:\n    KORNIA_CHECK_LAF(laf)\n    KORNIA_CHECK_SHAPE(img, ['B', '1', 'H', 'W'])\n    B, N = laf.shape[:2]\n    PS: int = self.patch_size\n    patches: torch.Tensor = extract_patches_from_pyramid(img, make_upright(laf), PS, True).view(-1, 1, PS, PS)\n    ellipse_shape: torch.Tensor = self.affine_shape_detector(patches)\n    ellipses = torch.cat([laf.view(-1, 2, 3)[..., 2].unsqueeze(1), ellipse_shape], dim=2).view(B, N, 5)\n    scale_orig = get_laf_scale(laf)\n    if self.preserve_orientation:\n        ori_orig = get_laf_orientation(laf)\n    laf_out = ellipse_to_laf(ellipses)\n    ellipse_scale = get_laf_scale(laf_out)\n    laf_out = scale_laf(laf_out, scale_orig / ellipse_scale)\n    if self.preserve_orientation:\n        laf_out = set_laf_orientation(laf_out, ori_orig)\n    return laf_out",
    "docstring": "Run forward. Args: laf: :math: img: :math: Returns: LAF_out: :math:",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\affine_shape.py",
    "ast_data": "FunctionDef name:forward arg:self arg:laf arg:img arguments arg arg arg Call Call Assign Call Call Call Call Assign Call Call Call Call Assign Call If Assign Call Assign Call Assign Call Assign Call If Assign Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "make_name_target",
    "source_code": "def make_name_target(self, *, name: str, production_group: str, location: str) -> addnodes.literal_strong:\n    name_node = addnodes.literal_strong(name, name)\n    prefix = f'grammar-token-{production_group}'\n    node_id = make_id(self.env, self.state.document, prefix, name)\n    name_node['ids'].append(node_id)\n    self.state.document.note_implicit_target(name_node, name_node)\n    obj_name = f'{production_group}:{name}' if production_group else name\n    std = self.env.domains.standard_domain\n    std.note_object('token', obj_name, node_id, location=location)\n    return name_node",
    "docstring": "Make a link target for the given production.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py",
    "ast_data": "FunctionDef name:make_name_target arg:self arguments arg arg arg arg Assign Call Assign Assign Call Call Call Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "references_index",
    "source_code": "def references_index(self, table, index):\n    return False",
    "docstring": "Return whether or not this instance references the specified index.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\ddl_references.py",
    "ast_data": "FunctionDef name:references_index arg:self arg:table arg:index arguments arg arg arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "MaxSizePartitioner",
    "source_code": "@tf_export('distribute.experimental.partitioners.MaxSizePartitioner', v1=[])\nclass MaxSizePartitioner(Partitioner):\n\n    def __init__(self, max_shard_bytes, max_shards=None, bytes_per_string=16):\n        if max_shard_bytes < 1:\n            raise ValueError(f'Argument `max_shard_bytes` must be positive. Received {max_shard_bytes}')\n        if max_shards and max_shards < 1:\n            raise ValueError(f'Argument `max_shards` must be positive. Received {max_shards}')\n        if bytes_per_string < 1:\n            raise ValueError(f'Argument `bytes_per_string` must be positive. Received: {bytes_per_string}')\n        self._max_shard_bytes = max_shard_bytes\n        self._max_shards = max_shards\n        self._bytes_per_string = bytes_per_string\n\n    def __call__(self, shape, dtype, axis=0):\n        return partitioned_variables.variable_axis_size_partitioner(max_shard_bytes=self._max_shard_bytes, max_shards=self._max_shards, bytes_per_string_element=self._bytes_per_string, axis=axis)(shape, dtype)",
    "docstring": "Partitioner that keeps shards below . This partitioner ensures each shard has at most , and tries to allocate as few shards as possible, i.e., keeping shard size as large as possible. If the partitioner hits the limit, then each shard may end up larger than . By default equals and no limit on the number of shards is enforced. Examples: >>> partitioner = MaxSizePartitioner(max_shard_bytes=4) >>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32) >>> [6, 1] >>> partitioner = MaxSizePartitioner(max_shard_bytes=4, max_shards=2) >>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32) >>> [2, 1] >>> partitioner = MaxSizePartitioner(max_shard_bytes=1024) >>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32) >>> [1, 1] >>> >>> # use in ParameterServerStrategy >>> # strategy = tf.distribute.experimental.ParameterServerStrategy( >>> # cluster_resolver=cluster_resolver, variable_partitioner=partitioner)",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py",
    "ast_data": "ClassDef name:MaxSizePartitioner FunctionDef name:__init__ arg:self arg:max_shard_bytes arg:max_shards arg:bytes_per_string arguments arg arg arg arg If Compare Raise Call If BoolOp Compare Raise Call If Compare Raise Call Assign Assign Assign FunctionDef name:__call__ arg:self arg:shape arg:dtype arg:axis arguments arg arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_binary_traverse",
    "source_code": "def _binary_traverse(self, nodes: NodeList) -> NodeSet:\n    return self._binary_search_impl(nodes, 0, len(nodes))",
    "docstring": "Binary search on for culprit.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\passes\\net_min_base.py",
    "ast_data": "FunctionDef name:_binary_traverse arg:self arg:nodes arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, x=0, y=0, text='', *, color=None, verticalalignment='baseline', horizontalalignment='left', multialignment=None, fontproperties=None, rotation=None, linespacing=None, rotation_mode=None, usetex=None, wrap=False, transform_rotates_text=False, parse_math=None, antialiased=None, **kwargs):\n    super().__init__()\n    self._x, self._y = (x, y)\n    self._text = ''\n    self._reset_visual_defaults(text=text, color=color, fontproperties=fontproperties, usetex=usetex, parse_math=parse_math, wrap=wrap, verticalalignment=verticalalignment, horizontalalignment=horizontalalignment, multialignment=multialignment, rotation=rotation, transform_rotates_text=transform_rotates_text, linespacing=linespacing, rotation_mode=rotation_mode, antialiased=antialiased)\n    self.update(kwargs)",
    "docstring": "Create a instance at *x*, *y* with string *text*. The text is aligned relative to the anchor point (*x*, *y*) according to `/gallery/text_labels_and_annotations/text_alignment`. While Text accepts the 'label' keyword argument, by default it is not added to the handles of a legend. Valid keyword arguments are: %(Text:kwdoc)s",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:x arg:y arg:text arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg arg Call Call Assign Assign Call Call"
  },
  {
    "library": "numpy",
    "name": "ndenumerate",
    "source_code": "@set_module('numpy')\nclass ndenumerate:\n\n    def __init__(self, arr):\n        self.iter = np.asarray(arr).flat\n\n    def __next__(self):\n        return (self.iter.coords, next(self.iter))\n\n    def __iter__(self):\n        return self",
    "docstring": "Multidimensional index iterator. Return an iterator yielding pairs of array coordinates and values. Parameters ---------- arr : ndarray Input array. See Also -------- ndindex, flatiter Examples -------- >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> for index, x in np.ndenumerate(a): ... print(index, x) (0, 0) 1 (0, 1) 2 (1, 0) 3 (1, 1) 4",
    "type": "class",
    "file_path": "numpy\\numpy\\lib\\_index_tricks_impl.py",
    "ast_data": "ClassDef name:ndenumerate FunctionDef name:__init__ arg:self arg:arr arguments arg arg Assign Call FunctionDef name:__next__ arg:self arguments arg Return return:yes Call FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "trimmed_std",
    "source_code": "def trimmed_std(a, limits=(0.1, 0.1), inclusive=(1, 1), relative=True, axis=None, ddof=0):\n    if not isinstance(limits, tuple) and isinstance(limits, float):\n        limits = (limits, limits)\n    if relative:\n        out = trimr(a, limits=limits, inclusive=inclusive, axis=axis)\n    else:\n        out = trima(a, limits=limits, inclusive=inclusive)\n    return out.std(axis=axis, ddof=ddof)",
    "docstring": "Returns the trimmed standard deviation of the data along the given axis. %s ddof : {0,integer}, optional Means Delta Degrees of Freedom. The denominator used during computations is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- biased estimate of the variance.",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:trimmed_std arg:a arg:limits arg:inclusive arg:relative arg:axis arg:ddof arguments arg arg arg arg arg arg If BoolOp Call Call Assign If Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "on_predict_batch_end",
    "source_code": "def on_predict_batch_end(self, batch, logs=None):\n    if self._should_call_predict_batch_hooks:\n        self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs)",
    "docstring": "Calls the methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_predict_batch_end arg:self arg:batch arg:logs arguments arg arg arg If Call"
  },
  {
    "library": "pytorch",
    "name": "post",
    "source_code": "@property\ndef post(self) -> Optional[int]:\n    return self._version.post[1] if self._version.post else None",
    "docstring": "The post-release number of the version. >>> print(Version(\"1.2.3\").post) None >>> Version(\"1.2.3.post1\").post 1",
    "type": "method",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "FunctionDef name:post arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "popen",
    "source_code": "def popen(fullcmd):\n    p = subprocess.Popen(fullcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)\n    return p.stdout",
    "docstring": "Invoke a subprocess via :mod:.",
    "type": "function",
    "file_path": "cherrypy\\cherrypy\\_cpmodpy.py",
    "ast_data": "FunctionDef name:popen arg:fullcmd arguments arg Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "print_graph",
    "source_code": "def print_graph(self, *, verbose=True, file=None):\n    print(self.__tx.output.graph.python_code('self', verbose=verbose).src, file=file)",
    "docstring": "Print the partially constructed FX graph that would be passed to the user compiler after compilation.",
    "type": "method",
    "file_path": "pytorch\\torch\\_dynamo\\comptime.py",
    "ast_data": "FunctionDef name:print_graph arg:self arguments arg arg arg Call Call"
  },
  {
    "library": "django",
    "name": "find",
    "source_code": "def find(self, path, find_all=False, **kwargs):\n    if kwargs:\n        find_all = self._check_deprecated_find_param(find_all=find_all, **kwargs)\n    matches = []\n    for prefix, root in self.locations:\n        if root not in searched_locations:\n            searched_locations.append(root)\n        matched_path = self.find_location(root, path, prefix)\n        if matched_path:\n            if not find_all:\n                return matched_path\n            matches.append(matched_path)\n    return matches",
    "docstring": "Look for files in the extra locations as defined in STATICFILES_DIRS.",
    "type": "method",
    "file_path": "django\\django\\contrib\\staticfiles\\finders.py",
    "ast_data": "FunctionDef name:find arg:self arg:path arg:find_all arguments arg arg arg arg If Assign Call Assign For If Compare Call Assign Call If If Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_attribute_type_compatible_with_arg",
    "source_code": "def _attribute_type_compatible_with_arg(attr: _schemas.AttributeParameter, value: ir.Value | int | float | bool | Sequence[int] | Sequence[float] | None) -> bool:\n    if isinstance(value, bool):\n        return attr.type is ir.AttributeType.INT\n    if isinstance(value, str):\n        return attr.type is ir.AttributeType.STRING\n    if isinstance(value, int):\n        return attr.type in {ir.AttributeType.INT, ir.AttributeType.FLOAT}\n    if isinstance(value, float):\n        return attr.type is ir.AttributeType.FLOAT\n    if isinstance(value, complex):\n        return False\n    if isinstance(value, Sequence):\n        if attr.type is ir.AttributeType.INTS:\n            return all((isinstance(i, int) for i in value))\n        if attr.type is ir.AttributeType.FLOATS:\n            return all((isinstance(i, (int, float)) for i in value))\n    if isinstance(value, torch.dtype):\n        return attr.type is ir.AttributeType.INT\n    if isinstance(value, (torch.device, torch.memory_format, torch.layout)):\n        return attr.type is ir.AttributeType.STRING\n    if value is None and (not attr.required):\n        return True\n    return False",
    "docstring": "Check if the attribute type is compatible with the argument.",
    "type": "function",
    "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_dispatching.py",
    "ast_data": "FunctionDef name:_attribute_type_compatible_with_arg arg:attr arg:value arguments arg arg If Call Return return:yes Compare If Call Return return:yes Compare If Call Return return:yes Compare If Call Return return:yes Compare If Call Return return:yes If Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call If Call Return return:yes Compare If Call Return return:yes Compare If BoolOp Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "avg_pool3d",
    "source_code": "def avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):\n    if not input.is_quantized:\n        raise ValueError(\"Input to 'quantized.avg_pool3d' must be quantized!\")\n    return torch.nn.functional.avg_pool3d(input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)",
    "docstring": "Applies 3D average-pooling operation in :math: regions by step size :math: steps. The number of output features is equal to the number of input planes. .. note:: The input quantization parameters propagate to the output. Args: input: quantized input tensor :math: kernel_size: size of the pooling region. Can be a single number or a tuple stride: stride of the pooling operation. Can be a single number or a tuple . Default: :attr: padding: implicit zero paddings on both sides of the input. Can be a single number or a tuple . Default: 0 ceil_mode: when True, will use instead of in the formula to compute the output shape. Default: `` divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used. Default: None",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py",
    "ast_data": "FunctionDef name:avg_pool3d arg:input arg:kernel_size arg:stride arg:padding arg:ceil_mode arg:count_include_pad arg:divisor_override arguments arg arg arg arg arg arg arg If Raise Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "__sklearn_is_fitted__",
    "source_code": "def __sklearn_is_fitted__(self):\n    return True",
    "docstring": "Return True since FunctionTransfomer is stateless.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_function_transformer.py",
    "ast_data": "FunctionDef name:__sklearn_is_fitted__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "inverse",
    "source_code": "def inverse(self) -> So2:\n    return So2(1 / self.z)",
    "docstring": "Return the inverse transformation. Example: >>> s = So2.identity() >>> s.inverse().z Parameter containing: tensor(1.+0.j, requires_grad=True)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py",
    "ast_data": "FunctionDef name:inverse arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "UnitNorm",
    "source_code": "class UnitNorm(Constraint):\n\n    def __init__(self, axis=0):\n        self.axis = axis\n\n    @doc_controls.do_not_generate_docs\n    def __call__(self, w):\n        return w / (backend.epsilon() + backend.sqrt(math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True)))\n\n    @doc_controls.do_not_generate_docs\n    def get_config(self):\n        return {'axis': self.axis}",
    "docstring": "Constrains the weights incident to each hidden unit to have unit norm. Also available via the shortcut function . Args: axis: integer, axis along which to calculate weight norms. For instance, in a layer the weight matrix has shape , set to to constrain each weight vector of length . In a layer with , the weight tensor has shape , set to to constrain the weights of each filter tensor of size .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\constraints.py",
    "ast_data": "ClassDef name:UnitNorm FunctionDef name:__init__ arg:self arg:axis arguments arg arg Assign FunctionDef name:__call__ arg:self arg:w arguments arg arg Return return:yes Call Call Call Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_get_codes_for_sorting",
    "source_code": "def _get_codes_for_sorting(self) -> list[Categorical]:\n\n    def cats(level_codes: np.ndarray) -> np.ndarray:\n        return np.arange(level_codes.max() + 1 if len(level_codes) else 0, dtype=level_codes.dtype)\n    return [Categorical.from_codes(level_codes, cats(level_codes), True, validate=False) for level_codes in self.codes]",
    "docstring": "we are categorizing our codes by using the available categories (all, not just observed) excluding any missing ones (-1); this is in preparation for sorting, where we need to disambiguate that -1 is not a valid valid",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:_get_codes_for_sorting arg:self arguments arg FunctionDef name:cats arg:level_codes arguments arg Return return:yes Call Call Call Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "to_dict",
    "source_code": "def to_dict(self, *, into: type[MutableMappingT] | MutableMappingT=dict) -> MutableMappingT:\n    into_c = com.standardize_mapping(into)\n    if is_object_dtype(self.dtype) or isinstance(self.dtype, ExtensionDtype):\n        return into_c(((k, maybe_box_native(v)) for k, v in self.items()))\n    else:\n        return into_c(self.items())",
    "docstring": "Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.MutableMapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.MutableMapping Key-value representation of Series. See Also -------- Series.to_list: Converts Series to a list of the values. Series.to_numpy: Converts Series to NumPy ndarray. Series.array: ExtensionArray of the data backing this Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(into=OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(into=dd) defaultdict(, {0: 1, 1: 2, 2: 3, 3: 4})",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:to_dict arg:self arguments arg arg Assign Call If BoolOp Call Call Return return:yes Call Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "pointbiserialr",
    "source_code": "def pointbiserialr(x, y):\n    x = ma.fix_invalid(x, copy=True).astype(bool)\n    y = ma.fix_invalid(y, copy=True).astype(float)\n    m = ma.mask_or(ma.getmask(x), ma.getmask(y))\n    if m is not nomask:\n        unmask = np.logical_not(m)\n        x = x[unmask]\n        y = y[unmask]\n    n = len(x)\n    phat = x.sum() / float(n)\n    y0 = y[~x]\n    y1 = y[x]\n    y0m = y0.mean()\n    y1m = y1.mean()\n    rpb = (y1m - y0m) * np.sqrt(phat * (1 - phat)) / y.std()\n    df = n - 2\n    t = rpb * ma.sqrt(df / (1.0 - rpb ** 2))\n    prob = _betai(0.5 * df, 0.5, df / (df + t * t))\n    return PointbiserialrResult(rpb, prob)",
    "docstring": "Calculates a point biserial correlation coefficient and its p-value. Parameters ---------- x : array_like of bools Input array. y : array_like Input array. Returns ------- correlation : float R value pvalue : float 2-tailed p-value Notes ----- Missing values are considered pair-wise: if a value is missing in x, the corresponding value in y is masked. For more details on , see .",
    "type": "function",
    "file_path": "scipy\\scipy\\stats\\_mstats_basic.py",
    "ast_data": "FunctionDef name:pointbiserialr arg:x arg:y arguments arg arg Assign Call Call Assign Call Call Assign Call Call Call If Compare Assign Call Assign Assign Assign Call Assign Call Call Assign Assign Assign Call Assign Call Assign Call Call Assign Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "kml",
    "source_code": "@property\ndef kml(self):\n    if self.hasz:\n        substr = '%s,%s,%s '\n    else:\n        substr = '%s,%s,0 '\n    return '<coordinates>%s</coordinates>' % ''.join((substr % self[i] for i in range(len(self)))).strip()",
    "docstring": "Return the KML representation for the coordinates.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:kml arg:self arguments arg If Assign Assign Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, cell, monitored_section_name=None, avoid_repetitive_counting=False):\n    self.cell = cell\n    self.monitored_section_name = monitored_section_name\n    self._avoid_repetitive_counting = avoid_repetitive_counting\n    self._counting = True",
    "docstring": "Creates a new MonitoredTimer. Args: cell: the cell associated with the time metric that will be inremented. monitored_section_name: name of action being monitored here. avoid_repetitive_counting: when set to True, if already in a monitored timer section with the same monitored_section_name, skip counting.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:cell arg:monitored_section_name arg:avoid_repetitive_counting arguments arg arg arg arg Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "output_shape",
    "source_code": "@property\n@doc_controls.do_not_doc_inheritable\ndef output_shape(self):\n    if not self._inbound_nodes:\n        raise AttributeError('The layer has never been called and thus has no defined output shape.')\n    all_output_shapes = set([str(node.output_shapes) for node in self._inbound_nodes])\n    if len(all_output_shapes) == 1:\n        return self._inbound_nodes[0].output_shapes\n    else:\n        raise AttributeError('The layer \"%s\" has multiple inbound nodes, with different output shapes. Hence the notion of \"output shape\" is ill-defined for the layer. Use `get_output_shape_at(node_index)` instead.' % self.name)",
    "docstring": "Retrieves the output shape(s) of a layer. Only applicable if the layer has one output, or if all outputs have the same shape. Returns: Output shape, as an integer shape tuple (or list of shape tuples, one tuple per output tensor). Raises: AttributeError: if the layer has no defined output shape. RuntimeError: if called in Eager mode.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py",
    "ast_data": "FunctionDef name:output_shape arg:self arguments arg If Raise Call Assign Call Call If Compare Call Return return:yes Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "executing_eagerly_outside_functions",
    "source_code": "@tf_export(v1=['executing_eagerly_outside_functions'])\ndef executing_eagerly_outside_functions() -> bool:\n    if context.executing_eagerly():\n        return True\n    else:\n        outer_context, _ = _get_outer_context_and_inner_device_stack()\n        with outer_context():\n            return context.executing_eagerly()",
    "docstring": "Returns True if executing eagerly, even if inside a graph function. This function will check the outermost context for the program and see if it is in eager mode. It is useful comparing to , which checks the current context and will return within a body. It can be used to build library that behave differently in eager runtime and v1 session runtime (deprecated). Example: >>> tf.compat.v1.enable_eager_execution() >>> @tf.function ... def func(): ... # A function constructs TensorFlow graphs, it does not execute eagerly, ... # but the outer most context is still eager. ... assert not tf.executing_eagerly() ... return tf.compat.v1.executing_eagerly_outside_functions() >>> func() Returns: boolean, whether the outermost context is in eager mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:executing_eagerly_outside_functions arguments If Call Return return:yes Assign Call With Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "find_nearest_contour",
    "source_code": "def find_nearest_contour(self, x, y, indices=None, pixel=True):\n    segment = index = d2 = None\n    with ExitStack() as stack:\n        if not pixel:\n            stack.enter_context(self._cm_set(transform=mtransforms.IdentityTransform()))\n        i_level, i_vtx, (xmin, ymin) = self._find_nearest_contour((x, y), indices)\n    if i_level is not None:\n        cc_cumlens = np.cumsum([*map(len, self._paths[i_level]._iter_connected_components())])\n        segment = cc_cumlens.searchsorted(i_vtx, 'right')\n        index = i_vtx if segment == 0 else i_vtx - cc_cumlens[segment - 1]\n        d2 = (xmin - x) ** 2 + (ymin - y) ** 2\n    return (i_level, segment, index, xmin, ymin, d2)",
    "docstring": "Find the point in the contour plot that is closest to ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:find_nearest_contour arg:self arg:x arg:y arg:indices arg:pixel arguments arg arg arg arg arg Assign With Call If Call Call Call Assign Call If Compare Assign Call Call Call Assign Call Assign Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, sess):\n    _check_type(sess, (session.BaseSession, monitored_session.MonitoredSession))\n    self.session = sess",
    "docstring": "Constructor. Args: sess: A tensorflow Session object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:sess arguments arg arg Call Assign"
  },
  {
    "library": "sphinx",
    "name": "_copy_times",
    "source_code": "def _copy_times(source: str | os.PathLike[str], dest: str | os.PathLike[str]) -> None:\n    st = source.stat() if isinstance(source, os.DirEntry) else os.stat(source)\n    os.utime(dest, ns=(st.st_atime_ns, st.st_mtime_ns))",
    "docstring": "Copy a file's modification times.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\osutil.py",
    "ast_data": "FunctionDef name:_copy_times arg:source arg:dest arguments arg arg Assign Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, ps_tasks, ps_device, worker_device, merge_devices, ps_ops, ps_strategy):\n    self._ps_tasks = ps_tasks\n    self._ps_device = ps_device\n    self._worker_device = worker_device\n    self._merge_devices = merge_devices\n    self._ps_ops = ps_ops\n    self._ps_strategy = ps_strategy",
    "docstring": "Create a new . Args: ps_tasks: Number of tasks in the job. ps_device: String. Name of the job. worker_device: String. Name of the job. merge_devices: Boolean. Set to True to allow merging of device specs. ps_ops: List of strings representing types that need to be placed on devices. ps_strategy: A callable invoked for every ps (i.e. matched by ), that takes the and returns the ps task index to use.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\device_setter.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:ps_tasks arg:ps_device arg:worker_device arg:merge_devices arg:ps_ops arg:ps_strategy arguments arg arg arg arg arg arg arg Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "_init",
    "source_code": "def _init(self):\n    self.label.set(x=0.5, y=0, verticalalignment='top', horizontalalignment='center', transform=mtransforms.blended_transform_factory(self.axes.transAxes, mtransforms.IdentityTransform()))\n    self.label_position = 'bottom'\n    if mpl.rcParams['xtick.labelcolor'] == 'inherit':\n        tick_color = mpl.rcParams['xtick.color']\n    else:\n        tick_color = mpl.rcParams['xtick.labelcolor']\n    self.offsetText.set(x=1, y=0, verticalalignment='top', horizontalalignment='right', transform=mtransforms.blended_transform_factory(self.axes.transAxes, mtransforms.IdentityTransform()), fontsize=mpl.rcParams['xtick.labelsize'], color=tick_color)\n    self.offset_text_position = 'bottom'",
    "docstring": "Initialize the label and offsetText instance values and / .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axis.py",
    "ast_data": "FunctionDef name:_init arg:self arguments arg Call Call Call Assign If Compare Assign Assign Call Call Call Assign"
  },
  {
    "library": "django",
    "name": "_join_route",
    "source_code": "@staticmethod\ndef _join_route(route1, route2):\n    if not route1:\n        return route2\n    route2 = route2.removeprefix('^')\n    return route1 + route2",
    "docstring": "Join two routes, without the starting ^ in the second route.",
    "type": "method",
    "file_path": "django\\django\\urls\\resolvers.py",
    "ast_data": "FunctionDef name:_join_route arg:route1 arg:route2 arguments arg arg If Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "fx",
    "source_code": "@property\ndef fx(self) -> Tensor:\n    return self.intrinsics[..., 0, 0]",
    "docstring": "Return the focal length in the x-direction. Returns: tensor of shape :math:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py",
    "ast_data": "FunctionDef name:fx arg:self arguments arg Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "cleanup",
    "source_code": "def cleanup(self) -> None:\n    pass",
    "docstring": "Cleanup any resources. The default implementation does nothing.",
    "type": "method",
    "file_path": "sphinx\\sphinx\\builders\\__init__.py",
    "ast_data": "FunctionDef name:cleanup arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_simple_reduce",
    "source_code": "def _simple_reduce(per_replica_value, reduce_to_device, accumulation_fn, reduce_op):\n    all_values = per_replica_value.values\n    if not all_values:\n        raise ValueError('`per_replica_value` must be non-empty')\n    count = len(all_values)\n    with ops.device(reduce_to_device):\n        with context.device_policy(context.DEVICE_PLACEMENT_SILENT):\n            reduced = cross_device_utils.aggregate_tensors_or_indexed_slices(all_values, accumulation_fn)\n            if reduce_op == reduce_util.ReduceOp.MEAN:\n                reduced = cross_device_utils.divide_by_n_tensors_or_indexed_slices(reduced, count)\n            elif reduce_op != reduce_util.ReduceOp.SUM:\n                raise ValueError('`reduce_op` must be Reduce.SUM or Reduce.MEAN.')\n    return reduced",
    "docstring": "Reduces the value by accumulation_fn and reduce_op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:_simple_reduce arg:per_replica_value arg:reduce_to_device arg:accumulation_fn arg:reduce_op arguments arg arg arg arg Assign If Raise Call Assign Call With Call With Call Assign Call If Compare Assign Call If Compare Raise Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_modules_to_backward_prefetch",
    "source_code": "def set_modules_to_backward_prefetch(self, modules: list[FSDPModule]) -> None:\n    _assert_all_fsdp_modules(modules)\n    self._get_fsdp_state()._states_to_backward_prefetch = [module._get_fsdp_state() for module in modules]",
    "docstring": "Sets the FSDP modules for which this FSDP module should explicitly prefetch all-gathers in backward. This overrides the default backward pretching implementation that prefetches the next FSDP module based on the reverse post-forward order. Passing a singleton list containing the previous FSDP module gives the same all-gather overlap behavior as the default overlap behavior. Passing a list with at least length two is required for more aggressive overlap and will use more reserved memory. Args: modules (List[FSDPModule]): FSDP modules to prefetch.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py",
    "ast_data": "FunctionDef name:set_modules_to_backward_prefetch arg:self arg:modules arguments arg arg Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_adjust_managed_modules",
    "source_code": "def _adjust_managed_modules(modules: list[nn.Module], ignored_params: set[nn.Parameter]) -> list[nn.Module]:\n    ignore_decision: dict[nn.Module, bool] = {}\n    new_modules = []\n    for module in modules:\n        ignored = _ignore_module(module, ignored_params, ignore_decision)\n        if not ignored:\n            new_modules.append(module)\n    return new_modules",
    "docstring": "Adjust the given list of managed modules by removing those with all parameters ignored.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_init.py",
    "ast_data": "FunctionDef name:_adjust_managed_modules arg:modules arg:ignored_params arguments arg arg Assign For Assign Call If Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "get_laf_orientation",
    "source_code": "def get_laf_orientation(LAF: Tensor) -> Tensor:\n    KORNIA_CHECK_LAF(LAF)\n    angle_rad = torch.atan2(LAF[..., 0, 1], LAF[..., 0, 0])\n    return rad2deg(angle_rad).unsqueeze(-1)",
    "docstring": "Return orientation of the LAFs, in degrees. Args: LAF: :math: Returns: angle in degrees :math: Example: >>> input = torch.ones(1, 5, 2, 3) # BxNx2x3 >>> output = get_laf_orientation(input) # BxNx1",
    "type": "function",
    "file_path": "kornia\\kornia\\feature\\laf.py",
    "ast_data": "FunctionDef name:get_laf_orientation arg:LAF arguments arg Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "getmodule",
    "source_code": "def getmodule(object):\n    return _inspect.getmodule(object)",
    "docstring": "TFDecorator-aware replacement for inspect.getmodule.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py",
    "ast_data": "FunctionDef name:getmodule arg:object arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_patch_optimizer_state_dict",
    "source_code": "@no_type_check\ndef _patch_optimizer_state_dict(model: nn.Module, *, optimizers: tuple[torch.optim.Optimizer, ...], options: Optional[StateDictOptions]=None) -> None:\n    _state_dict_call = functools.partial(get_optimizer_state_dict, model=model, optimizers=optimizers, options=options)\n\n    def state_dict_call():\n        return _state_dict_call()\n    _load_state_dict_call = functools.partial(set_optimizer_state_dict, model=model, optimizers=optimizers, options=options)\n\n    def load_state_dict_call(state_dict: dict[str, Any]):\n        _load_state_dict_call(optim_state_dict=state_dict)\n    _patched_state_dict.add(state_dict_call)\n    _patched_state_dict.add(load_state_dict_call)\n    optimizers = (optimizers,) if isinstance(optimizers, torch.optim.Optimizer) else tuple(optimizers)\n    for optim in optimizers:\n        optim.state_dict = state_dict_call\n        optim.load_state_dict = load_state_dict_call",
    "docstring": "Patch the `StateDictOptions` for the details. Returns: None",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict.py",
    "ast_data": "FunctionDef name:_patch_optimizer_state_dict arg:model arguments arg arg arg Assign Call FunctionDef name:state_dict_call arguments Return return:yes Call Assign Call FunctionDef name:load_state_dict_call arg:state_dict arguments arg Call Call Call Assign Call Call For Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, **kwargs):\n    super().__init__()\n    global _END_TIME_OF_LAST_WRITE\n    with _END_TIME_OF_LAST_WRITE_LOCK:\n        if _END_TIME_OF_LAST_WRITE is None:\n            _END_TIME_OF_LAST_WRITE = time.time()\n    for k, v in sorted(kwargs.items(), key=lambda item: item[0]):\n        setattr(self, k, v)\n        if not isinstance(getattr(self, k), (base.Trackable, def_function.Function)):\n            raise ValueError(f'`Checkpoint` was expecting a trackable object (an object derived from `Trackable`), got {v}. If you believe this object should be trackable (i.e. it is part of the TensorFlow Python API and manages state), please open an issue.')\n    self._save_counter = None\n    self._save_assign_op = None\n    self._saver = TrackableSaver(graph_view_lib.ObjectGraphView(self))",
    "docstring": "Group objects into a training checkpoint. Args: **kwargs: Keyword arguments are set as attributes of this object, and are saved with the checkpoint. Values must be trackable objects. Raises: ValueError: If objects in are not trackable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:__init__ arg:self arguments arg arg Call Call With If Compare Assign Call For Call Call arguments arg Call If Call Call Raise Call Assign Assign Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_verify_setup",
    "source_code": "def _verify_setup(self):\n    if not self._is_chief:\n        for op in self._graph.get_operations():\n            if op.type in ['Variable', 'VariableV2'] and (not op.device):\n                raise ValueError('When using replicas, all Variables must have their device set: %s' % op)",
    "docstring": "Check that all is good. Raises: ValueError: If something is not good.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py",
    "ast_data": "FunctionDef name:_verify_setup arg:self arguments arg If For Call If BoolOp Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_tensors_in_key_list",
    "source_code": "def _tensors_in_key_list(key_list):\n    if isinstance(key_list, tensor_lib.Tensor):\n        yield key_list\n    if isinstance(key_list, (list, tuple)):\n        for v in key_list:\n            for tensor in _tensors_in_key_list(v):\n                yield tensor\n    if isinstance(key_list, slice):\n        for tensor in _tensors_in_key_list(key_list.start):\n            yield tensor\n        for tensor in _tensors_in_key_list(key_list.stop):\n            yield tensor\n        for tensor in _tensors_in_key_list(key_list.step):\n            yield tensor",
    "docstring": "Generates all Tensors in the given slice spec.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_getitem.py",
    "ast_data": "FunctionDef name:_tensors_in_key_list arg:key_list arguments arg If Call If Call For For Call If Call For Call For Call For Call"
  },
  {
    "library": "numpy",
    "name": "_NoValueType",
    "source_code": "class _NoValueType:\n    __instance = None\n\n    def __new__(cls):\n        if not cls.__instance:\n            cls.__instance = super().__new__(cls)\n        return cls.__instance\n\n    def __repr__(self):\n        return '<no value>'",
    "docstring": "Special keyword value. The instance of this class may be used as the default value assigned to a keyword if no other obvious default (e.g., ) is suitable, Common reasons for using this keyword are: - A new keyword is added to a function, and that function forwards its inputs to another function or method which can be defined outside of NumPy. For example, `` unconditionally could have broken previously working code. - A keyword is being deprecated, and a deprecation warning must only be emitted when the keyword is used.",
    "type": "class",
    "file_path": "numpy\\numpy\\_globals.py",
    "ast_data": "ClassDef name:_NoValueType Assign FunctionDef name:__new__ arg:cls arguments arg If Assign Call Call Return return:yes FunctionDef name:__repr__ arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "row_or_column_stride",
    "source_code": "def row_or_column_stride(self, node: IRNode, default_value: int=0) -> str:\n    if node is None or len(node.get_stride()) < 2:\n        return str(default_value)\n    stride0 = node.get_stride()[-1]\n    stride1 = node.get_stride()[-2]\n    if stride0 == 1:\n        return cexpr(self.rename_indexing(stride1))\n    elif stride1 == 1:\n        return cexpr(self.rename_indexing(stride0))\n    else:\n        raise RuntimeError(f'At least 1 stride should be 1. Strides: node.get_stride()={node.get_stride()!r}')",
    "docstring": "Hook called from template code to get the row or column stride of an arg. This is required by some CUTLASS 2.X APIs. If the node is in row_major, it returns stride[-2]. If the node is in column_major, it returns stride[-1]. TODO: Will add needed args to pass it in if it is dynamic.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py",
    "ast_data": "FunctionDef name:row_or_column_stride arg:self arg:node arg:default_value arguments arg arg arg If BoolOp Compare Compare Call Call Return return:yes Call Assign Call Assign Call If Compare Return return:yes Call Call If Compare Return return:yes Call Call Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_under_xla_context",
    "source_code": "def _is_under_xla_context():\n    g = ops.get_default_graph()\n    while g is not None:\n        control_flow_context = g._get_control_flow_context()\n        while control_flow_context is not None:\n            if control_flow_context.IsXLAContext():\n                return True\n            else:\n                control_flow_context = control_flow_context.outer_context\n        g = getattr(g, 'outer_graph', None)\n    return False",
    "docstring": "Check if we are currently inside an XLA compile context.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\control_flow_ops.py",
    "ast_data": "FunctionDef name:_is_under_xla_context arguments Assign Call While Compare Assign Call While Compare If Call Return return:yes Assign Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_next_year",
    "source_code": "def get_next_year(self, date):\n    return _get_next_prev(self, date, is_previous=False, period='year')",
    "docstring": "Get the next valid year.",
    "type": "method",
    "file_path": "django\\django\\views\\generic\\dates.py",
    "ast_data": "FunctionDef name:get_next_year arg:self arg:date arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_sparse_semi_structured_tile",
    "source_code": "def _sparse_semi_structured_tile(dense):\n\n    def greedy_prune_tile(tile):\n        num_kept_row = [0, 0, 0, 0]\n        num_kept_col = [0, 0, 0, 0]\n        for x in tile.flatten().sort(descending=True, stable=True).indices:\n            r, c = (x // 4, x % 4)\n            if num_kept_row[r] < 2 and num_kept_col[c] < 2:\n                num_kept_row[r] += 1\n                num_kept_col[c] += 1\n            else:\n                tile[r, c] = 0\n    for batch in dense.unfold(0, 4, 4).unfold(1, 4, 4):\n        for tile in batch:\n            greedy_prune_tile(tile)\n    return dense",
    "docstring": "This function computes a 2:4 sparse tile by greedily taking the largest values. Since we take the largest values greedily, how the sorting algorithm handles duplicates affects the ultimate sparsity pattern. Note that this function does not have the same sorting semantics as our CUDA backend, which is exposed via and thus returns a different pattern.",
    "type": "function",
    "file_path": "pytorch\\torch\\sparse\\_semi_structured_conversions.py",
    "ast_data": "FunctionDef name:_sparse_semi_structured_tile arg:dense arguments arg FunctionDef name:greedy_prune_tile arg:tile arguments arg Assign Assign For Call Call Assign If BoolOp Compare Compare Assign For Call Call For Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "clip_grad_norm_",
    "source_code": "@_no_grad\ndef clip_grad_norm_(parameters: _tensor_or_tensors, max_norm: float, norm_type: float=2.0, error_if_nonfinite: bool=False, foreach: Optional[bool]=None) -> torch.Tensor:\n    if isinstance(parameters, torch.Tensor):\n        parameters = [parameters]\n    else:\n        is_generator = isinstance(parameters, types.GeneratorType)\n        parameters = list(parameters)\n        if is_generator and len(parameters) == 0:\n            warnings.warn('`parameters` is an empty generator, no gradient clipping will occur.', stacklevel=3)\n    grads = [p.grad for p in parameters if p.grad is not None]\n    total_norm = _get_total_norm(grads, norm_type, error_if_nonfinite, foreach)\n    _clip_grads_with_norm_(parameters, max_norm, total_norm, foreach)\n    return total_norm",
    "docstring": "Clip the gradient norm of an iterable of parameters. The norm is computed over the norms of the individual gradients of all parameters, as if the norms of the individual gradients were concatenated into a single vector. Gradients are modified in-place. This function is equivalent to :func: followed by :func: with the `parameters` Returns: Total norm of the parameter gradients (viewed as a single vector).",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\utils\\clip_grad.py",
    "ast_data": "FunctionDef name:clip_grad_norm_ arg:parameters arg:max_norm arg:norm_type arg:error_if_nonfinite arg:foreach arguments arg arg arg arg arg If Call Assign Assign Call Assign Call If BoolOp Compare Call Call Assign Compare Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "get_form",
    "source_code": "def get_form(self, request, obj=None, **kwargs):\n    defaults = {}\n    if obj is None:\n        defaults['form'] = self.add_form\n    defaults.update(kwargs)\n    return super().get_form(request, obj, **defaults)",
    "docstring": "Use special form during user creation",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\admin.py",
    "ast_data": "FunctionDef name:get_form arg:self arg:request arg:obj arguments arg arg arg arg Assign If Compare Assign Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "build_project",
    "source_code": "@classmethod\ndef build_project(cls, dirs, args, env):\n    cmd = ['ninja', '-C', str(dirs.build)]\n    if args.parallel is None:\n        n_cores = cpu_count(only_physical_cores=True)\n        cmd += [f'-j{n_cores}']\n    else:\n        cmd += ['-j', str(args.parallel)]\n    cmd_str = ' '.join([str(p) for p in cmd])\n    cls.console.print(f'{EMOJI.cmd} [cmd] {cmd_str}')\n    ret = subprocess.call(cmd, env=env, cwd=dirs.root)\n    if ret == 0:\n        print('Build OK')\n    else:\n        print('Build failed!')\n        sys.exit(1)",
    "docstring": "Build a dev version of the project.",
    "type": "method",
    "file_path": "scipy\\dev.py",
    "ast_data": "FunctionDef name:build_project arg:cls arg:dirs arg:args arg:env arguments arg arg arg arg Assign Call If Compare Assign Call Call Assign Call Call Call Assign Call If Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._dtype",
    "docstring": "The of s handled by this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "buffer_read_counts",
    "source_code": "@cache_on_self\ndef buffer_read_counts(self) -> dict[str, int]:\n    read_counts: dict[str, int] = collections.defaultdict(int)\n    for node in self.scheduler_nodes():\n        for read_dep in node.read_writes.reads:\n            read_counts[read_dep.name] += 1\n    return dict(read_counts)",
    "docstring": "Counts how many times each buffer is read within the kernel",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\simd_kernel_features.py",
    "ast_data": "FunctionDef name:buffer_read_counts arg:self arguments arg Call For Call For Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "outputs",
    "source_code": "@property\ndef outputs(self):\n    return self._func_graph.outputs",
    "docstring": "Returns tensors in corresponding to returned tensors.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py",
    "ast_data": "FunctionDef name:outputs arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "AttributeMutation",
    "source_code": "class AttributeMutation(MutationType):\n\n    def __init__(self, typ: SourceType):\n        super().__init__(typ)",
    "docstring": "This case of VariableTracker.mutation_type marker indicates that Dynamo allows mutation on the value's attributes.",
    "type": "class",
    "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py",
    "ast_data": "ClassDef name:AttributeMutation FunctionDef name:__init__ arg:self arg:typ arguments arg arg Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_graph_execution_function",
    "source_code": "def _make_graph_execution_function(model, mode):\n\n    def _per_replica_function(model):\n        f = model._make_execution_function(mode)\n        return (f.inputs, f.outputs, f.updates_op, f.session_kwargs)\n    strategy = model._distribution_strategy\n    with strategy.scope():\n        grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args = strategy.extended.call_for_each_replica(_per_replica_function, args=(get_distributed_model(model, mode),))\n        init_restore_or_wait_for_variables()\n        all_inputs, all_outputs, all_updates, all_session_args = unwrap_values(strategy, grouped_inputs, grouped_outputs, grouped_updates, grouped_session_args, with_loss_tensor=mode != ModeKeys.PREDICT)\n        return backend.function(all_inputs, all_outputs, updates=all_updates, name='distributed_{}_function'.format(mode), **all_session_args)",
    "docstring": "Makes function to run one step of distributed model in graph mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py",
    "ast_data": "FunctionDef name:_make_graph_execution_function arg:model arg:mode arguments arg arg FunctionDef name:_per_replica_function arg:model arguments arg Assign Call Return return:yes Assign With Call Assign Call Call Call Assign Call Compare Return return:yes Call Call"
  },
  {
    "library": "cryptography",
    "name": "private_bytes_raw",
    "source_code": "@abc.abstractmethod\ndef private_bytes_raw(self) -> bytes:\n    pass",
    "docstring": "The raw bytes of the private key. Equivalent to private_bytes(Raw, Raw, NoEncryption()).",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\x448.py",
    "ast_data": "FunctionDef name:private_bytes_raw arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_getitem",
    "source_code": "def _getitem(self, slice_spec):\n    if isinstance(slice_spec, bool) or (isinstance(slice_spec, core_tf_types.Tensor) and slice_spec.dtype == dtypes.bool) or (isinstance(slice_spec, (np.ndarray, np_arrays.ndarray)) and slice_spec.dtype == np.bool_):\n        return array_ops.boolean_mask(tensor=self, mask=slice_spec)\n    if not isinstance(slice_spec, tuple):\n        slice_spec = _as_spec_tuple(slice_spec)\n    result_t = _slice_helper(self, slice_spec)\n    return result_t",
    "docstring": "Implementation of ndarray.__getitem__.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py",
    "ast_data": "FunctionDef name:_getitem arg:self arg:slice_spec arguments arg arg If BoolOp Call BoolOp Call Compare BoolOp Call Compare Return return:yes Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "PolarAffine",
    "source_code": "class PolarAffine(mtransforms.Affine2DBase):\n\n    def __init__(self, scale_transform, limits):\n        super().__init__()\n        self._scale_transform = scale_transform\n        self._limits = limits\n        self.set_children(scale_transform, limits)\n        self._mtx = None\n    __str__ = mtransforms._make_str_method('_scale_transform', '_limits')\n\n    def get_matrix(self):\n        if self._invalid:\n            limits_scaled = self._limits.transformed(self._scale_transform)\n            yscale = limits_scaled.ymax - limits_scaled.ymin\n            affine = mtransforms.Affine2D().scale(0.5 / yscale).translate(0.5, 0.5)\n            self._mtx = affine.get_matrix()\n            self._inverted = None\n            self._invalid = 0\n        return self._mtx",
    "docstring": "The affine part of the polar projection. Scales the output so that maximum radius rests on the edge of the Axes circle and the origin is mapped to (0.5, 0.5). The transform applied is the same to x and y components and given by: .. math:: x_{1} = 0.5 \\left [ \\frac{x_{0}}{(r_{\\max} - r_{\\min})} + 1 \\right ] :math: are the minimum and maximum radial limits after any scaling (e.g. log scaling) has been removed.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "ClassDef name:PolarAffine FunctionDef name:__init__ arg:self arg:scale_transform arg:limits arguments arg arg arg Call Call Assign Assign Call Assign Assign Call FunctionDef name:get_matrix arg:self arguments arg If Assign Call Assign Assign Call Call Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "predict_classes",
    "source_code": "def predict_classes(self, x, batch_size=32, verbose=0):\n    warnings.warn('`model.predict_classes()` is deprecated and will be removed after 2021-01-01. Please use instead:* `np.argmax(model.predict(x), axis=-1)`,   if your model does multi-class classification   (e.g. if it uses a `softmax` last-layer activation).* `(model.predict(x) > 0.5).astype(\"int32\")`,   if your model does binary classification   (e.g. if it uses a `sigmoid` last-layer activation).')\n    proba = self.predict(x, batch_size=batch_size, verbose=verbose)\n    if proba.shape[-1] > 1:\n        return proba.argmax(axis=-1)\n    else:\n        return (proba > 0.5).astype('int32')",
    "docstring": "Generate class predictions for the input samples. The input samples are processed batch by batch. Args: x: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs). batch_size: integer. verbose: verbosity mode, 0 or 1. Returns: A numpy array of class predictions.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\sequential.py",
    "ast_data": "FunctionDef name:predict_classes arg:self arg:x arg:batch_size arg:verbose arguments arg arg arg arg Call Assign Call If Compare Return return:yes Call Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "get_next_as_list",
    "source_code": "def get_next_as_list(self, name=None):\n    del name\n    with ops.device(self._worker):\n        data_list = [self._fn() for _ in self._devices]\n        return data_list",
    "docstring": "Get next element from the callable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py",
    "ast_data": "FunctionDef name:get_next_as_list arg:self arg:name arguments arg arg With Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_n_splits",
    "source_code": "def get_n_splits(self, X, y=None, groups=None):\n    if X is None:\n        raise ValueError(\"The 'X' parameter should not be None.\")\n    return _num_samples(X)",
    "docstring": "Returns the number of splitting iterations in the cross-validator. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py",
    "ast_data": "FunctionDef name:get_n_splits arg:self arg:X arg:y arg:groups arguments arg arg arg arg If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_optim_state_dict_impl",
    "source_code": "@staticmethod\ndef _optim_state_dict_impl(model: torch.nn.Module, optim: torch.optim.Optimizer, optim_state_dict: dict[str, Any], optim_input: Optional[Union[list[dict[str, Any]], Iterable[torch.nn.Parameter]]]=None, rank0_only: bool=True, full_state_dict: bool=True, group: Optional[dist.ProcessGroup]=None, cpu_offload: bool=True, *, _stacklevel: int=1) -> dict[str, Any]:\n    if full_state_dict:\n        FullyShardedDataParallel._warn_optim_input(optim_input, stacklevel=_stacklevel + 1)\n        using_optim_input = FullyShardedDataParallel._is_using_optim_input(optim_input, optim)\n    else:\n        using_optim_input = False\n        assert optim_input is None and (not rank0_only)\n    use_orig_params = FullyShardedDataParallel.fsdp_modules(model)[0]._use_orig_params\n    assert all((use_orig_params == m._use_orig_params for m in FullyShardedDataParallel.fsdp_modules(model))), 'Not all FSDP modules have the same _use_orig_params value'\n    return _optim_state_dict(model=model, optim=optim, optim_state_dict=optim_state_dict, optim_input=optim_input, rank0_only=rank0_only, shard_state=not full_state_dict, group=group, using_optim_input=using_optim_input, use_orig_params=use_orig_params, cpu_offload=cpu_offload)",
    "docstring": "Transform the state-dict of an optimizer corresponding to a sharded model. This is the internal API that is used by all the optim_state_dict implementations. Given model, optim, the original optim_state_dict, this API removes the FSDP internal information and internal sharding from the optim_state_dict.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:_optim_state_dict_impl arg:model arg:optim arg:optim_state_dict arg:optim_input arg:rank0_only arg:full_state_dict arg:group arg:cpu_offload arguments arg arg arg arg arg arg arg arg arg If Call Assign Call Assign BoolOp Compare Assign Call Call Compare Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_get_pg_default_device",
    "source_code": "def _get_pg_default_device(group: Optional[ProcessGroup]=None) -> torch.device:\n    warnings.warn('`_get_pg_default_device` will be deprecated, it only stays for backward-compatiblity reason. If you need to find a device for object collectives, please use `_get_object_coll_device`. If you need to query the device types supported by group, please use `_device_capability(group)`. ')\n    group = group or _get_default_group()\n    if not isinstance(group, ProcessGroup):\n        warnings.warn(f'You are using a Backend {type(group)} as a ProcessGroup. This usage is deprecated since PyTorch 2.0. Please use a public API of PyTorch Distributed instead.', FutureWarning, stacklevel=3)\n        return torch.device('cpu')\n    '\\n    ``group._device_types`` is a property pybind that returns the devices\\n    (\"cpu\", \"cuda\", etc) supported by ``group``. Can be multiple if the\\n    ``group`` supports multiple devices.\\n    '\n    devices = group._device_types\n    if len(devices) == 1:\n        return devices[0]\n    elif len(devices) == 0:\n        raise RuntimeError('Default device not found, because no backend has been registered with this ProcessGroup.')\n    else:\n        if torch.device('cpu') in devices:\n            rv = torch.device('cpu')\n        else:\n            rv = devices[0]\n        warnings.warn(f'Multiple backends are registered with this ProcessGroup. We cannot determine which one is the default. Returning {rv}. Please consider using other APIs.')\n        return rv",
    "docstring": ".. note:: This method will be deprecated, it only stays for backward-compatiblity reason. Alternatives: - If you need to find a device for object collectives, please use . - If you need to query the device types supported by group, please use . Return the device type registered with `init_process_group(\"nccl\", ...)torch.device(\"cuda\")`.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:_get_pg_default_device arg:group arguments arg Call Assign BoolOp Call If Call Call Call Return return:yes Call Assign If Compare Call Return return:yes If Compare Call Raise Call If Compare Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "gml",
    "source_code": "@property\ndef gml(self):\n    return capi.to_gml(self.ptr)",
    "docstring": "Return the GML representation of the Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py",
    "ast_data": "FunctionDef name:gml arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "token",
    "source_code": "@token.setter\ndef token(self, token):\n    if token is None:\n        self.auth.token = None\n        self.auth.token_secret = None\n        self.auth.verifier = None\n    elif 'oauth_token' in token:\n        self.auth.token = token['oauth_token']\n        if 'oauth_token_secret' in token:\n            self.auth.token_secret = token['oauth_token_secret']\n        if 'oauth_verifier' in token:\n            self.auth.verifier = token['oauth_verifier']\n    else:\n        message = f'oauth_token is missing: {token!r}'\n        self.handle_error('missing_token', message)",
    "docstring": "This token setter is designed for an easy integration for OAuthClient. Make sure both OAuth1Session and OAuth2Session have token setters.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth1\\client.py",
    "ast_data": "FunctionDef name:token arg:self arg:token arguments arg arg If Compare Assign Assign Assign If Compare Assign If Compare Assign If Compare Assign Assign Call"
  },
  {
    "library": "numpy",
    "name": "check_embedded_msvcr_match_linked",
    "source_code": "def check_embedded_msvcr_match_linked(msver):\n    maj = msvc_runtime_major()\n    if maj:\n        if not maj == int(msver):\n            raise ValueError('Discrepancy between linked msvcr (%d) and the one about to be embedded (%d)' % (int(msver), maj))",
    "docstring": "msver is the ms runtime version used for the MANIFEST.",
    "type": "function",
    "file_path": "numpy\\numpy\\distutils\\mingw32ccompiler.py",
    "ast_data": "FunctionDef name:check_embedded_msvcr_match_linked arg:msver arguments arg Assign Call If If Compare Call Raise Call Call"
  },
  {
    "library": "pytorch",
    "name": "_get_grad_fn_or_grad_acc",
    "source_code": "def _get_grad_fn_or_grad_acc(t: torch.Tensor) -> Union[Node, None]:\n    if t.requires_grad and t.grad_fn is None:\n        viewed_t = t.view_as(t)\n        grad_fn = viewed_t.grad_fn\n        if grad_fn is not None:\n            return grad_fn.next_functions[0][0]\n        else:\n            raise RuntimeError('Attempted to get grad_fn, but got None.Is this being created in a no-grad context?')\n    else:\n        return t.grad_fn",
    "docstring": "Get the grad function or grad accumulator for a tensor. Accumulate grad nodes are lazily created, so we need to a dummy view in order to trigger its creation.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_backward.py",
    "ast_data": "FunctionDef name:_get_grad_fn_or_grad_acc arg:t arguments arg If BoolOp Compare Assign Call Assign If Compare Return return:yes Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_values",
    "source_code": "@property\ndef _values(self) -> ExtensionArray | np.ndarray:\n    return self._data",
    "docstring": "The best array representation. This is an ndarray or ExtensionArray. ``. It may differ from the public '.values' method. index | values | _values | ----------------- | --------------- | ------------- | Index | ndarray | ndarray | CategoricalIndex | Categorical | Categorical | DatetimeIndex | ndarray[M8ns] | DatetimeArray | DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray | PeriodIndex | ndarray[object] | PeriodArray | IntervalIndex | IntervalArray | IntervalArray | See Also -------- values : Values",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:_values arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_Assert3DImage",
    "source_code": "def _Assert3DImage(image):\n    return control_flow_ops.with_dependencies(_Check3DImage(image, require_static=False), image)",
    "docstring": "Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: 3-D Tensor of shape [height, width, channels] Raises: ValueError: if is not a 3-vector. Returns: If the shape of could be verified statically, is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py",
    "ast_data": "FunctionDef name:_Assert3DImage arg:image arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "_check_extra",
    "source_code": "def _check_extra(self, obj):\n    if not isinstance(obj.extra, int):\n        return must_be('an integer', option='extra', obj=obj, id='admin.E203')\n    else:\n        return []",
    "docstring": "Check that extra is an integer.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_extra arg:self arg:obj arguments arg arg If Call Return return:yes Call Return return:no"
  },
  {
    "library": "pytorch",
    "name": "install_generation_tagging_init",
    "source_code": "def install_generation_tagging_init() -> None:\n    if getattr(Module, '___needs_generation_tag_patch', True):\n        init = Module.__init__\n\n        def patched_init(self: Module, *args: Any, **kwargs: Any) -> None:\n            init(self, *args, **kwargs)\n            GenerationTracker.tag(self)\n        Module.__init__ = patched_init\n        setstate = Module.__setstate__\n\n        def patched_setstate(self: Module, state: Any) -> None:\n            setstate(self, state)\n            GenerationTracker.tag(self)\n        Module.__setstate__ = patched_setstate\n        Module.___needs_generation_tag_patch = False\n    GenerationTracker.generation += 1",
    "docstring": "Monkey patch torch.nn.Module.__init__ and torch.nn.Module.__setstate__ so we can detect nn.Module instances created dynamically inside forward methods.",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\mutation_guard.py",
    "ast_data": "FunctionDef name:install_generation_tagging_init arguments If Call Assign FunctionDef name:patched_init arg:self arguments arg arg arg Call Call Assign Assign FunctionDef name:patched_setstate arg:self arg:state arguments arg arg Call Call Assign Assign"
  },
  {
    "library": "matplotlib",
    "name": "make_axes_area_auto_adjustable",
    "source_code": "def make_axes_area_auto_adjustable(ax, use_axes=None, pad=0.1, adjust_dirs=None):\n    if adjust_dirs is None:\n        adjust_dirs = ['left', 'right', 'bottom', 'top']\n    divider = make_axes_locatable(ax)\n    if use_axes is None:\n        use_axes = ax\n    divider.add_auto_adjustable_area(use_axes=use_axes, pad=pad, adjust_dirs=adjust_dirs)",
    "docstring": "Add auto-adjustable padding around *ax* to take its decorations (title, labels, ticks, ticklabels) into account during layout, using . By default, padding is determined from the decorations of *ax*. Pass *use_axes* to consider the decorations of other Axes instead.",
    "type": "function",
    "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py",
    "ast_data": "FunctionDef name:make_axes_area_auto_adjustable arg:ax arg:use_axes arg:pad arg:adjust_dirs arguments arg arg arg arg If Compare Assign Assign Call If Compare Assign Call"
  },
  {
    "library": "sphinx",
    "name": "Rubric",
    "source_code": "class Rubric(SphinxDirective):\n    required_arguments = 1\n    optional_arguments = 0\n    final_argument_whitespace = True\n    option_spec = {'class': directives.class_option, 'name': directives.unchanged, 'heading-level': lambda c: directives.choice(c, ('1', '2', '3', '4', '5', '6'))}\n\n    def run(self) -> list[nodes.rubric | nodes.system_message]:\n        set_classes(self.options)\n        rubric_text = self.arguments[0]\n        textnodes, messages = self.parse_inline(rubric_text, lineno=self.lineno)\n        if 'heading-level' in self.options:\n            self.options['heading-level'] = int(self.options['heading-level'])\n        rubric = nodes.rubric(rubric_text, '', *textnodes, **self.options)\n        self.add_name(rubric)\n        return [rubric, *messages]",
    "docstring": "A patch of the docutils' :rst:dir: directive, which adds a level option to specify the heading level of the rubric.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\directives\\patches.py",
    "ast_data": "ClassDef name:Rubric Assign Assign Assign Assign arguments arg Call FunctionDef name:run arg:self arguments arg Call Assign Assign Call If Compare Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "PipeInfo",
    "source_code": "@dataclass\nclass PipeInfo:\n    graph: fx.Graph\n    num_stages: int\n    has_loss_and_backward: bool",
    "docstring": "Captures information for a pipeline ( object).",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\pipelining\\_utils.py",
    "ast_data": "ClassDef name:PipeInfo"
  },
  {
    "library": "tensorflow",
    "name": "keras_inputs",
    "source_code": "@property\ndef keras_inputs(self):\n    return self._keras_inputs",
    "docstring": "Tensors input to this node that can be traced back to a .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\node.py",
    "ast_data": "FunctionDef name:keras_inputs arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "res_arg",
    "source_code": "def res_arg(self, ns, types_ns, f_name, name, type_anno, f_is_local):\n    raise NotImplementedError('subclasses must implement')",
    "docstring": "Resolves the type of a (possibly annotated) function argument. Args: ns: namespace types_ns: types namespace f_name: str, the function name name: str, the argument name type_anno: the type annotating the argument, if any f_is_local: bool, whether the function is a local function Returns: Set of the argument types.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\type_inference.py",
    "ast_data": "FunctionDef name:res_arg arg:self arg:ns arg:types_ns arg:f_name arg:name arg:type_anno arg:f_is_local arguments arg arg arg arg arg arg arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_broadcast_object",
    "source_code": "def _broadcast_object(obj: Any, src_rank: int, group: object=dist.group.WORLD, device: torch.device=torch.device('cpu')) -> Any:\n    if dist.get_rank() == src_rank:\n        buffer = io.BytesIO()\n        torch.save(obj, buffer)\n        data = bytearray(buffer.getbuffer())\n        length_tensor = torch.LongTensor([len(data)]).to(device)\n        data_send_tensor = torch.ByteTensor(data).to(device)\n        dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False)\n        dist.broadcast(data_send_tensor, src=src_rank, group=group, async_op=False)\n    else:\n        length_tensor = torch.LongTensor([0]).to(device)\n        dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False)\n        data_recv_tensor = torch.empty([int(length_tensor.item())], dtype=torch.uint8, device=device)\n        dist.broadcast(data_recv_tensor, src=src_rank, group=group, async_op=False)\n        buffer = io.BytesIO(data_recv_tensor.cpu().numpy())\n        obj = torch.load(buffer, map_location=device, weights_only=False)\n    return obj",
    "docstring": "Broadcasts an object to the given group. It will be sending the object if called from the source rank and receiving the object otherwise. Arguments: obj: object to broadcast; only used if called on the source rank. src_rank (int): source rank. group (``). Returns: The broadcasted object.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_broadcast_object arg:obj arg:src_rank arg:group arg:device arguments arg arg arg arg Call If Compare Call Assign Call Call Assign Call Call Assign Call Call Call Assign Call Call Call Call Assign Call Call Call Assign Call Call Call Call Assign Call Call Call Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_useOffset",
    "source_code": "def get_useOffset(self):\n    return self._useOffset",
    "docstring": "Return whether automatic mode for offset notation is active. This returns True if ``. See Also -------- ScalarFormatter.set_useOffset",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:get_useOffset arg:self arguments arg Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_validate",
    "source_code": "def _validate(self) -> None:\n    if len(self._ndarray) and (not lib.is_string_array(self._ndarray, skipna=True)):\n        raise ValueError('StringArray requires a sequence of strings or pandas.NA')\n    if self._ndarray.dtype != 'object':\n        raise ValueError(f\"StringArray requires a sequence of strings or pandas.NA. Got '{self._ndarray.dtype}' dtype instead.\")\n    if self._ndarray.ndim > 2:\n        lib.convert_nans_to_NA(self._ndarray.ravel('K'))\n    else:\n        lib.convert_nans_to_NA(self._ndarray)",
    "docstring": "Validate that we only store NA or strings.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\string_.py",
    "ast_data": "FunctionDef name:_validate arg:self arguments arg If BoolOp Call Call Raise Call If Compare Raise Call If Compare Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_handle_deleter",
    "source_code": "def _get_handle_deleter(graph, deleter_key, handle):\n    result = graph._handle_deleters.get(deleter_key)\n    if result is None:\n        handle_device = TensorHandle._get_device_name(handle)\n        with graph.as_default(), graph.device(handle_device):\n            holder = array_ops.placeholder(dtypes.string)\n            deleter = gen_data_flow_ops.delete_session_tensor(holder)\n        result = (holder, deleter)\n        graph._handle_deleters[deleter_key] = result\n    return result",
    "docstring": "Return a deletion subgraph for this handle.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:_get_handle_deleter arg:graph arg:deleter_key arg:handle arguments arg arg arg Assign Call If Compare Assign Call With Call Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "EmptyQuerySet",
    "source_code": "class EmptyQuerySet(metaclass=InstanceCheckMeta):\n\n    def __init__(self, *args, **kwargs):\n        raise TypeError(\"EmptyQuerySet can't be instantiated\")",
    "docstring": "Marker class to checking if a queryset is empty by .none(): isinstance(qs.none(), EmptyQuerySet) -> True",
    "type": "class",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "ClassDef name:EmptyQuerySet FunctionDef name:__init__ arg:self arguments arg arg arg Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_all_sum_grad",
    "source_code": "@ops.RegisterGradient('NcclAllReduce')\ndef _all_sum_grad(op, grad):\n    if op.get_attr('reduction') != b'sum':\n        raise LookupError('No gradient defined for NcclAllReduce except for reduction=\"sum\".')\n    _check_device(grad, expected=op.device)\n    num_devices = op.get_attr('num_devices')\n    shared_name = op.get_attr('shared_name') + b'_grad'\n    with ops.device(op.device):\n        return gen_nccl_ops.nccl_all_reduce(input=grad, reduction='sum', num_devices=num_devices, shared_name=shared_name)",
    "docstring": "The gradients for . Args: op: The that we are differentiating. grad: Gradient with respect to the output of the op. Returns: The gradient with respect to the output of . Raises: LookupError: If is not .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nccl_ops.py",
    "ast_data": "FunctionDef name:_all_sum_grad arg:op arg:grad arguments arg arg If Compare Call Raise Call Call Assign Call Assign Call With Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "rc_file_defaults",
    "source_code": "def rc_file_defaults():\n    with _api.suppress_matplotlib_deprecation_warning():\n        from .style.core import STYLE_BLACKLIST\n        rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig if k not in STYLE_BLACKLIST})",
    "docstring": "Restore the from the original rc file loaded by Matplotlib. Style-blacklisted (defined in ``) are not updated.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:rc_file_defaults arguments With Call Call Compare"
  },
  {
    "library": "pandas",
    "name": "validate_data_columns",
    "source_code": "def validate_data_columns(self, data_columns, min_itemsize, non_index_axes) -> list:\n    if not len(non_index_axes):\n        return []\n    axis, axis_labels = non_index_axes[0]\n    info = self.info.get(axis, {})\n    if info.get('type') == 'MultiIndex' and data_columns:\n        raise ValueError(f'cannot use a multi-index on axis [{axis}] with data_columns {data_columns}')\n    if data_columns is True:\n        data_columns = list(axis_labels)\n    elif data_columns is None:\n        data_columns = []\n    if isinstance(min_itemsize, dict):\n        existing_data_columns = set(data_columns)\n        data_columns = list(data_columns)\n        data_columns.extend([k for k in min_itemsize.keys() if k != 'values' and k not in existing_data_columns])\n    return [c for c in data_columns if c in axis_labels]",
    "docstring": "take the input data_columns and min_itemize and create a data columns spec",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:validate_data_columns arg:self arg:data_columns arg:min_itemsize arg:non_index_axes arguments arg arg arg arg If Call Return return:no Assign Assign Call If BoolOp Compare Call Raise Call If Compare Assign Call If Compare Assign If Call Assign Call Assign Call Call Call BoolOp Compare Compare Return return:yes Compare"
  },
  {
    "library": "tensorflow",
    "name": "parse_readable_size_str",
    "source_code": "def parse_readable_size_str(size_str):\n    size_str = size_str.strip()\n    if size_str.endswith('B'):\n        size_str = size_str[:-1]\n    if size_str.isdigit():\n        return int(size_str)\n    elif size_str.endswith('k'):\n        return int(float(size_str[:-1]) * 1024)\n    elif size_str.endswith('M'):\n        return int(float(size_str[:-1]) * 1048576)\n    elif size_str.endswith('G'):\n        return int(float(size_str[:-1]) * 1073741824)\n    else:\n        raise ValueError('Failed to parsed human-readable byte size str: \"%s\"' % size_str)",
    "docstring": "Convert a human-readable str representation to number of bytes. Only the units \"kB\", \"MB\", \"GB\" are supported. The \"B character at the end of the input may be omitted. Args: size_str: () A human-readable str representing a number of bytes (e.g., \"0\", \"1023\", \"1.1kB\", \"24 MB\", \"23GB\", \"100 G\". Returns: () The parsed number of bytes. Raises: ValueError: on failure to parse the input .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py",
    "ast_data": "FunctionDef name:parse_readable_size_str arg:size_str arguments arg Assign Call If Call Assign If Call Return return:yes Call If Call Return return:yes Call Call If Call Return return:yes Call Call If Call Return return:yes Call Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "local",
    "source_code": "@property\ndef local(self) -> Optional[str]:\n    if self._version.local:\n        return '.'.join((str(x) for x in self._version.local))\n    else:\n        return None",
    "docstring": "The local version segment of the version. >>> print(Version(\"1.2.3\").local) None >>> Version(\"1.2.3+abc\").local 'abc'",
    "type": "method",
    "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py",
    "ast_data": "FunctionDef name:local arg:self arguments arg If Return return:yes Call Call Return return:no"
  },
  {
    "library": "sphinx",
    "name": "_substitute_copyright_year",
    "source_code": "def _substitute_copyright_year(copyright_line: str, current_year: str, replace_year: str) -> str:\n    if len(copyright_line) < 4 or not copyright_line[:4].isdigit():\n        return copyright_line\n    if copyright_line[:4] == current_year and copyright_line[4:5] in {'', ' ', ','}:\n        return replace_year + copyright_line[4:]\n    if copyright_line[4:5] != '-':\n        return copyright_line\n    if copyright_line[5:9].isdigit() and copyright_line[5:9] == current_year and (copyright_line[9:10] in {'', ' ', ','}):\n        return copyright_line[:5] + replace_year + copyright_line[9:]\n    return copyright_line",
    "docstring": "Replace the year in a single copyright line. Legal formats are: * ``.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\config.py",
    "ast_data": "FunctionDef name:_substitute_copyright_year arg:copyright_line arg:current_year arg:replace_year arguments arg arg arg If BoolOp Compare Call Call Return return:yes If BoolOp Compare Compare Return return:yes If Compare Return return:yes If BoolOp Call Compare Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_device_locations",
    "source_code": "def get_device_locations(mesh: layout_lib.Mesh, client_id: Optional[int]=None) -> List[Dict[str, int]]:\n    if mesh.device_type() != _TPU_DEVICE_TYPE:\n        raise ValueError('The mesh must be a TPU mesh')\n    if client_id is None or client_id == config.client_id():\n        return mesh.local_device_locations()\n    raise NotImplementedError(\"Looking up other clients' device locations is not supported\")",
    "docstring": "Returns the device locations of all TPU cores local to the given client. A device location is a dictionary from dimension names to indices on those dimensions. For example, for a 2x2 mesh ('x', 'y'), this function returns a permutation of this list: [{'x': 0, 'y': 0}, {'x': 0, 'y': 1}, {'x': 1, 'y': 0}, {'x': 1, 'y': 1}]. Note that device IDs and device locations are equivalent. The former is a linearization of the latter along mesh dimensions. Args: mesh: A TPU mesh. client_id: Optional; A DTensor client ID. If empty, query this client.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\dtensor\\python\\tpu_util.py",
    "ast_data": "FunctionDef name:get_device_locations arg:mesh arg:client_id arguments arg arg If Compare Call Raise Call If BoolOp Compare Compare Call Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "initialize_system_for_tpu_embedding",
    "source_code": "def initialize_system_for_tpu_embedding(embedding_config: embedding_pb2.TPUEmbeddingConfiguration, job: Optional[Text]=None) -> ops.Operation:\n    config_string = embedding_config.SerializeToString()\n    with ops.device(_tpu_system_device_name(job)):\n        return tpu_ops.configure_tpu_embedding(config=config_string)",
    "docstring": "Initializes a distributed TPU Embedding system for use with TensorFlow. The following two are equivalent: 1. initialize_system() with embedding_config. 2. initialize_system() without embedding_config, then initialize_system_for_tpu_embedding(). initialize_system() should not be called with embedding_config if initialize_system_for_tpu_embedding() is meant to be called later. Args: embedding_config: a proto describing the desired configuration of the hardware embedding lookup tables. job: The job (the XXX in TensorFlow device specification /job:XXX) that contains the TPU devices that will be initialized. If job=None it is assumed there is only one job in the TensorFlow flock, and an error will be returned if this assumption does not hold. Returns: A no-op.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py",
    "ast_data": "FunctionDef name:initialize_system_for_tpu_embedding arg:embedding_config arg:job arguments arg arg Assign Call With Call Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "__len__",
    "source_code": "def __len__(self):\n    product = partial(reduce, operator.mul)\n    return sum((product((len(v) for v in p.values())) if p else 1 for p in self.param_grid))",
    "docstring": "Number of points on the grid.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:__len__ arg:self arguments arg Assign Call Return return:yes Call Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "TensorTracerConfig",
    "source_code": "class TensorTracerConfig(object):\n\n    def __init__(self):\n        self.version = _CURRENT_VERSION\n        self.device_type = None\n        self.num_replicas = None\n        self.num_replicas_per_host = None\n        self.num_hosts = None",
    "docstring": "Tensor Tracer config object.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py",
    "ast_data": "ClassDef name:TensorTracerConfig FunctionDef name:__init__ arg:self arguments arg Assign Assign Assign Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "create_resource",
    "source_code": "def create_resource(self, function, args=None, kwargs=None):\n    closure = ResourceClosure(function, self._cluster.resource_cancellation_mgr, args=args, kwargs=kwargs)\n    return self._register_and_schedule_resource_closure(closure)",
    "docstring": "Asynchronously creates a per-worker resource represented by a . Args: function: the resource function to be run remotely. It should be a , a concrete function or a Python function. args: positional arguments to be passed to the function. kwargs: keyword arguments to be passed to the function. Returns: one or several RemoteValue objects depending on the function return values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py",
    "ast_data": "FunctionDef name:create_resource arg:self arg:function arg:args arg:kwargs arguments arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "check_same_shape",
    "source_code": "def check_same_shape(*args, allow_cpu_scalar_tensors: bool):\n    shape = None\n    for arg in args:\n        if isinstance(arg, Number):\n            continue\n        elif isinstance(arg, TensorLike):\n            if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg):\n                continue\n            if shape is None:\n                shape = arg.shape\n            if not is_same_shape(shape, arg.shape):\n                msg = f'Shape {arg.shape} is not the expected shape {shape}!'\n                raise RuntimeError(msg)\n        else:\n            msg = 'Unexpected type when checking for same shape, ' + str(type(arg)) + '!'\n            raise RuntimeError(msg)",
    "docstring": "Checks that all Tensors in args have the same shape. Raises a RuntimeError when: - args contains an object whose type is not Tensor or Number - two Tensor objects in args have different devices",
    "type": "function",
    "file_path": "pytorch\\torch\\_prims_common\\__init__.py",
    "ast_data": "FunctionDef name:check_same_shape arguments arg arg Assign For If Call If Call If BoolOp Call If Compare Assign If Call Assign Raise Call Assign Call Call Raise Call"
  },
  {
    "library": "django",
    "name": "logout_then_login",
    "source_code": "def logout_then_login(request, login_url=None):\n    login_url = resolve_url(login_url or settings.LOGIN_URL)\n    return LogoutView.as_view(next_page=login_url)(request)",
    "docstring": "Log out the user if they are logged in. Then redirect to the login page.",
    "type": "function",
    "file_path": "django\\django\\contrib\\auth\\views.py",
    "ast_data": "FunctionDef name:logout_then_login arg:request arg:login_url arguments arg arg Assign Call BoolOp Return return:yes Call Call"
  },
  {
    "library": "pandas",
    "name": "check_below_min_count",
    "source_code": "def check_below_min_count(shape: tuple[int, ...], mask: npt.NDArray[np.bool_] | None, min_count: int) -> bool:\n    if min_count > 0:\n        if mask is None:\n            non_nulls = np.prod(shape)\n        else:\n            non_nulls = mask.size - mask.sum()\n        if non_nulls < min_count:\n            return True\n    return False",
    "docstring": "Check for the keyword. Returns True if below (when missing value should be returned from the reduction). Parameters ---------- shape : tuple The shape of the values (). mask : ndarray[bool] or None Boolean numpy array (typically of same shape as ) or None. min_count : int Keyword passed through from sum/prod call. Returns ------- bool",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\nanops.py",
    "ast_data": "FunctionDef name:check_below_min_count arg:shape arg:mask arg:min_count arguments arg arg arg If Compare If Compare Assign Call Assign Call If Compare Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "num_gpus",
    "source_code": "def num_gpus(self):\n    self.ensure_initialized()\n    return self._num_gpus",
    "docstring": "The number of GPUs available to execute operations.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:num_gpus arg:self arguments arg Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_sparse_tensors",
    "source_code": "def get_sparse_tensors(self, transformation_cache, state_manager):\n    return CategoricalColumn.IdWeightPair(transformation_cache.get(self, state_manager), None)",
    "docstring": "See base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:get_sparse_tensors arg:self arg:transformation_cache arg:state_manager arguments arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_is_eager",
    "source_code": "def _is_eager(self):\n    rt = self\n    while isinstance(rt, RaggedTensor):\n        if not isinstance(rt.row_splits, ops.EagerTensor):\n            return False\n        rt = rt.values\n    return isinstance(rt, ops.EagerTensor)",
    "docstring": "Returns True if values & row_splits Tensors are all s.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py",
    "ast_data": "FunctionDef name:_is_eager arg:self arguments arg Assign While Call If Call Return return:yes Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "calibrate_and_quantize_single",
    "source_code": "@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.QUANTIZE_USING_DEPRECATED_QUANTIZER)\ndef calibrate_and_quantize_single(self, dataset_gen, input_type, output_type, allow_float, op_output_name, resize_input=True):\n    self._feed_tensors(dataset_gen, resize_input)\n    return self._calibrator.QuantizeModel(np.dtype(input_type.as_numpy_dtype()).num, np.dtype(output_type.as_numpy_dtype()).num, allow_float, op_output_name)",
    "docstring": "Calibrates the model with specified generator and then quantizes it. Only the single op with output op_output_name will be quantized. The input shapes of the calibrator are resized with the calibration data. Returns: A quantized model. Args: dataset_gen: A generator that generates calibration samples. input_type: A tf.dtype representing the desired real-value input type. output_type: A tf.dtype representing the desired real-value output type. allow_float: A boolean. False if the resulting model cannot perform float computation, useful when targeting an integer-only backend. If False, an error will be thrown if an operation cannot be quantized, otherwise the model will fallback to float ops. op_output_name: A string, only this op will be quantized. resize_input: A boolean. True if the shape of the sample data is different from the input.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\lite\\python\\optimize\\calibrator.py",
    "ast_data": "FunctionDef name:calibrate_and_quantize_single arg:self arg:dataset_gen arg:input_type arg:output_type arg:allow_float arg:op_output_name arg:resize_input arguments arg arg arg arg arg arg arg Call Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "strip_overloads",
    "source_code": "def strip_overloads(gm):\n    for node in gm.graph.nodes:\n        if isinstance(node.target, torch._ops.OpOverload):\n            node.target = node.target.overloadpacket\n    gm.recompile()",
    "docstring": "Modifies the target of graph nodes in :attr: to strip overloads. Args: gm(fx.GraphModule): The input Fx graph module to be modified",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\dynamo\\microbenchmarks\\operatorbench.py",
    "ast_data": "FunctionDef name:strip_overloads arg:gm arguments arg For If Call Assign Call"
  },
  {
    "library": "django",
    "name": "valid",
    "source_code": "@property\ndef valid(self):\n    return capi.geos_isvalid(self.ptr)",
    "docstring": "Test the validity of this Geometry.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py",
    "ast_data": "FunctionDef name:valid arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_internal_operation_seed",
    "source_code": "def _internal_operation_seed(self):\n    return self._rng.randint(0, _MAXINT32)",
    "docstring": "Returns a fake operation seed. In eager mode, user shouldn't set or depend on operation seed. Here, we generate a random seed based on global seed to make operation's randomness different and depend on the global seed. Returns: A fake operation seed based on global seed.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:_internal_operation_seed arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "_add_points2d_as_flat_tensors_to_num_ray_dict",
    "source_code": "@staticmethod\ndef _add_points2d_as_flat_tensors_to_num_ray_dict(n: int, x: Tensor, y: Tensor, camera_id: int, points2d_as_flat_tensors: Dict[int, Points2D_FlatTensors]) -> None:\n    if n not in points2d_as_flat_tensors:\n        points2d_as_flat_tensors[n] = RaySampler.Points2D_FlatTensors()\n        points2d_as_flat_tensors[n]._x = x.flatten()\n        points2d_as_flat_tensors[n]._y = y.flatten()\n    else:\n        points2d_as_flat_tensors[n]._x = torch.cat((points2d_as_flat_tensors[n]._x, x.flatten()))\n        points2d_as_flat_tensors[n]._y = torch.cat((points2d_as_flat_tensors[n]._y, y.flatten()))\n    points2d_as_flat_tensors[n]._camera_ids.append(camera_id)",
    "docstring": "Add x/y pixel coordinates for all rays casted by a scene camera to dictionary of pixel coordinates grouped by total number of rays.",
    "type": "method",
    "file_path": "kornia\\kornia\\nerf\\samplers.py",
    "ast_data": "FunctionDef name:_add_points2d_as_flat_tensors_to_num_ray_dict arg:n arg:x arg:y arg:camera_id arg:points2d_as_flat_tensors arguments arg arg arg arg arg If Compare Assign Call Assign Call Assign Call Assign Call Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "quantile_with_mask",
    "source_code": "def quantile_with_mask(values: np.ndarray, mask: npt.NDArray[np.bool_], fill_value, qs: npt.NDArray[np.float64], interpolation: str) -> np.ndarray:\n    assert values.shape == mask.shape\n    if values.ndim == 1:\n        values = np.atleast_2d(values)\n        mask = np.atleast_2d(mask)\n        res_values = quantile_with_mask(values, mask, fill_value, qs, interpolation)\n        return res_values[0]\n    assert values.ndim == 2\n    is_empty = values.shape[1] == 0\n    if is_empty:\n        flat = np.full(len(qs), fill_value)\n        result = np.repeat(flat, len(values)).reshape(len(values), len(qs))\n    else:\n        result = _nanquantile(values, qs, na_value=fill_value, mask=mask, interpolation=interpolation)\n        result = np.asarray(result)\n        result = result.T\n    return result",
    "docstring": "Compute the quantiles of the given values for each quantile in . Parameters ---------- values : np.ndarray For ExtensionArray, this is _values_for_factorize()[0] mask : np.ndarray[bool] mask = isna(values) For ExtensionArray, this is computed before calling _value_for_factorize fill_value : Scalar The value to interpret fill NA entries with For ExtensionArray, this is _values_for_factorize()[1] qs : np.ndarray[float64] interpolation : str Type of interpolation Returns ------- np.ndarray Notes ----- Assumes values is already 2D. For ExtensionArray this means np.atleast_2d has been called on _values_for_factorize()[0] Quantile is computed along axis=1.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\array_algos\\quantile.py",
    "ast_data": "FunctionDef name:quantile_with_mask arg:values arg:mask arg:fill_value arg:qs arg:interpolation arguments arg arg arg arg arg Compare If Compare Assign Call Assign Call Assign Call Return return:yes Compare Assign Compare If Assign Call Call Assign Call Call Call Call Call Assign Call Assign Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_arch_list",
    "source_code": "def get_arch_list() -> list[str]:\n    if not _is_compiled():\n        return []\n    arch_flags = torch._C._xpu_getArchFlags()\n    if arch_flags is None:\n        return []\n    return arch_flags.split()",
    "docstring": "Return list XPU architectures this library was compiled for.",
    "type": "function",
    "file_path": "pytorch\\torch\\xpu\\__init__.py",
    "ast_data": "FunctionDef name:get_arch_list arguments If Call Return return:no Assign Call If Compare Return return:no Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_is_decade",
    "source_code": "def _is_decade(x, *, base=10, rtol=None):\n    if not np.isfinite(x):\n        return False\n    if x == 0.0:\n        return True\n    lx = np.log(abs(x)) / np.log(base)\n    if rtol is None:\n        return np.isclose(lx, np.round(lx))\n    else:\n        return np.isclose(lx, np.round(lx), rtol=rtol)",
    "docstring": "Return True if *x* is an integer power of *base*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:_is_decade arg:x arguments arg arg arg If Call Return return:yes If Compare Return return:yes Assign Call Call Call If Compare Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "GetBlasLapackFuncs",
    "source_code": "class GetBlasLapackFuncs(Benchmark):\n    param_names = ['dtype1', 'dtype2', 'dtype1_ord', 'dtype2_ord', 'size']\n    params = [['b', 'G', 'd'], ['d', 'F', '?'], ['C', 'F'], ['C', 'F'], [10, 100, 1000]]\n\n    def setup(self, dtype1, dtype2, dtype1_ord, dtype2_ord, size):\n        self.arr1 = np.empty(size, dtype=dtype1, order=dtype1_ord)\n        self.arr2 = np.empty(size, dtype=dtype2, order=dtype2_ord)\n\n    def time_find_best_blas_type(self, dtype1, dtype2, dtype1_ord, dtype2_ord, size):\n        prefix, dtype, prefer_fortran = bla.find_best_blas_type((self.arr1, self.arr2))",
    "docstring": "Test the speed of grabbing the correct BLAS/LAPACK routine flavor. In particular, upon receiving strange dtype arrays the results shouldn't diverge too much. Hence the results here should be comparable",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\blas_lapack.py",
    "ast_data": "ClassDef name:GetBlasLapackFuncs Assign Assign FunctionDef name:setup arg:self arg:dtype1 arg:dtype2 arg:dtype1_ord arg:dtype2_ord arg:size arguments arg arg arg arg arg arg Assign Call Assign Call FunctionDef name:time_find_best_blas_type arg:self arg:dtype1 arg:dtype2 arg:dtype1_ord arg:dtype2_ord arg:size arguments arg arg arg arg arg arg Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "from_key_counter",
    "source_code": "@classmethod\ndef from_key_counter(cls, key, counter, alg):\n    counter = _convert_to_state_tensor(counter)\n    key = _convert_to_state_tensor(key)\n    alg = random_ops_util.convert_alg_to_int(alg)\n    counter.shape.assert_is_compatible_with([_get_state_size(alg) - 1])\n    key.shape.assert_is_compatible_with([])\n    key = array_ops.reshape(key, [1])\n    state = array_ops.concat([counter, key], 0)\n    return cls(state=state, alg=alg)",
    "docstring": "Creates a generator from a key and a counter. This constructor only applies if the algorithm is a counter-based algorithm. See method for the meaning of \"key\" and \"counter\". Args: key: the key for the RNG, a scalar of type STATE_TYPE. counter: a vector of dtype STATE_TYPE representing the initial counter for the RNG, whose length is algorithm-specific., alg: the RNG algorithm. If None, it will be auto-selected. See for its possible values. Returns: The new generator.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:from_key_counter arg:cls arg:key arg:counter arg:alg arguments arg arg arg arg Assign Call Assign Call Assign Call Call Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "reverse_sequence_v2",
    "source_code": "@tf_export('reverse_sequence', v1=[])\n@dispatch.add_dispatch_support\ndef reverse_sequence_v2(input, seq_lengths, seq_axis=None, batch_axis=None, name=None):\n    return gen_array_ops.reverse_sequence(input=input, seq_lengths=seq_lengths, seq_dim=seq_axis, batch_dim=batch_axis, name=name)",
    "docstring": "Reverses variable length slices. This op first slices along the dimension , and for each slice , reverses the first elements along the dimension . The elements of must obey TensorTensorint32int64input.dims(batch_axis)max(seq_lengths) <= input.dims(seq_axis)intint0`. The dimension along which reversal is performed. name: A name for the operation (optional). Returns: A Tensor. Has the same type as input.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:reverse_sequence_v2 arg:input arg:seq_lengths arg:seq_axis arg:batch_axis arg:name arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "get_length_by_instance",
    "source_code": "@abstractmethod\ndef get_length_by_instance(self, instance_id: int):\n    pass",
    "docstring": "Raise TypeError if it's not supposed to be implemented to support .",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\combining.py",
    "ast_data": "FunctionDef name:get_length_by_instance arg:self arg:instance_id arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "device_name",
    "source_code": "@property\ndef device_name(self):\n    return self._device_name",
    "docstring": "Name of the device that the tensor belongs to. Returns: () device name.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py",
    "ast_data": "FunctionDef name:device_name arg:self arguments arg Return return:yes"
  },
  {
    "library": "django",
    "name": "_has_filters",
    "source_code": "def _has_filters(self):\n    return self.query.has_filters()",
    "docstring": "Check if this QuerySet has any filtering going on. This isn't equivalent with checking if all objects are present in results, for example, qs[1:]._has_filters() -> False.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:_has_filters arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "apply",
    "source_code": "def apply(self, func: AggFuncType, args: tuple[Any, ...]=(), *, by_row: Literal[False, 'compat']='compat', **kwargs) -> DataFrame | Series:\n    return SeriesApply(self, func, by_row=by_row, args=args, kwargs=kwargs).apply()",
    "docstring": "Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. args : tuple Positional arguments passed to func after the series value. by_row : False or \"compat\", default \"compat\" If `gotchas.udf-mutation`. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:apply arg:self arg:func arg:args arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "CopyIfCallgrind",
    "source_code": "class CopyIfCallgrind:\n\n    def __init__(self, value: Any, *, setup: Optional[str]=None):\n        for method, supported_types in _GLOBALS_ALLOWED_TYPES.items():\n            if any((isinstance(value, t) for t in supported_types)):\n                self._value: Any = value\n                self._setup: Optional[str] = setup\n                self._serialization: Serialization = method\n                break\n        else:\n            supported_str = '\\n'.join([getattr(t, '__name__', repr(t)) for t in it.chain(_GLOBALS_ALLOWED_TYPES.values())])\n            raise ValueError(f'Unsupported type: {type(value)}\\n`collect_callgrind` restricts globals to the following types:\\n{textwrap.indent(supported_str, '  ')}')\n\n    @property\n    def value(self) -> Any:\n        return self._value\n\n    @property\n    def setup(self) -> Optional[str]:\n        return self._setup\n\n    @property\n    def serialization(self) -> Serialization:\n        return self._serialization\n\n    @staticmethod\n    def unwrap_all(globals: dict[str, Any]) -> dict[str, Any]:\n        return {k: v.value if isinstance(v, CopyIfCallgrind) else v for k, v in globals.items()}",
    "docstring": "Signal that a global may be replaced with a deserialized copy. See for why this matters.",
    "type": "class",
    "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py",
    "ast_data": "ClassDef name:CopyIfCallgrind FunctionDef name:__init__ arg:self arg:value arguments arg arg arg For Call If Call Call Assign Call Call Call Call Call Raise Call Call Call FunctionDef name:value arg:self arguments arg Return return:yes FunctionDef name:setup arg:self arguments arg Return return:yes FunctionDef name:serialization arg:self arguments arg Return return:yes FunctionDef name:unwrap_all arg:globals arguments arg Return return:yes Call Call"
  },
  {
    "library": "django",
    "name": "Migration",
    "source_code": "@classproperty\ndef Migration(cls):\n    if cls._migration_class is None:\n\n        class Migration(models.Model):\n            app = models.CharField(max_length=255)\n            name = models.CharField(max_length=255)\n            applied = models.DateTimeField(default=now)\n\n            class Meta:\n                apps = Apps()\n                app_label = 'migrations'\n                db_table = 'django_migrations'\n\n            def __str__(self):\n                return 'Migration %s for %s' % (self.name, self.app)\n        cls._migration_class = Migration\n    return cls._migration_class",
    "docstring": "Lazy load to avoid AppRegistryNotReady if installed apps import MigrationRecorder.",
    "type": "method",
    "file_path": "django\\django\\db\\migrations\\recorder.py",
    "ast_data": "FunctionDef name:Migration arg:cls arguments arg If Compare ClassDef name:Migration Assign Call Assign Call Assign Call ClassDef name:Meta Assign Call Assign Assign FunctionDef name:__str__ arg:self arguments arg Return return:yes Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "Ursem03",
    "source_code": "class Ursem03(Benchmark):\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(-2, 2), (-1.5, 1.5)]\n        self.global_optimum = [[0.0 for _ in range(self.N)]]\n        self.fglob = -3.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        u = -(sin(2.2 * pi * x[0] + 0.5 * pi) * ((2.0 - abs(x[0])) / 2.0) * ((3.0 - abs(x[0])) / 2))\n        v = -(sin(2.2 * pi * x[1] + 0.5 * pi) * ((2.0 - abs(x[1])) / 2) * ((3.0 - abs(x[1])) / 2))\n        return u + v",
    "docstring": "Ursem 3 objective function. This class defines the Ursem 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Ursem03}}(x) = - \\sin(2.2 \\pi x_1 + 0.5 \\pi) \\frac{2 - \\lvert x_1 \\rvert}{2} \\frac{3 - \\lvert x_1 \\rvert}{2} - \\sin(2.2 \\pi x_2 + 0.5 \\pi) \\frac{2 - \\lvert x_2 \\rvert}{2} \\frac{3 - \\lvert x_2 \\rvert}{2} with :math:, :math:. *Global optimum*: :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO Gavana and Jamil #157 disagree on the formulae here. Jamil squares the x[1] term in the sine expression. Gavana doesn't. Go with Gavana here.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_U.py",
    "ast_data": "ClassDef name:Ursem03 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_mutation_lock",
    "source_code": "def _mutation_lock(self) -> lock_util.GroupLock._Context:\n    return self._group_lock.group(_MUTATION_LOCK_GROUP)",
    "docstring": "Returns a lock to guard code that creates & mutates ops. See the comment for self._group_lock for more info.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:_mutation_lock arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "coef_C",
    "source_code": "class coef_C(sympy.Function):\n    nargs = 3\n\n    @classmethod\n    def eval(cls, m, rho, beta):\n        if not m >= 0:\n            raise ValueError('must have m >= 0')\n        v = symbols('v')\n        expression = (1 - v) ** (-beta) * g(2 * m, rho, v) ** (-m - Rational(1, 2))\n        res = expression.diff(v, 2 * m).subs(v, 0) / factorial(2 * m)\n        res = res * (gamma(m + Rational(1, 2)) / (2 * pi) * (2 / (rho + 1)) ** (m + Rational(1, 2)))\n        return res",
    "docstring": "Calculate coefficients C_m for integer m. C_m is the coefficient of v^(2*m) in the Taylor expansion in v=0 of Gamma(m+1/2)/(2*pi) * (2/(rho+1))^(m+1/2) * (1-v)^(-b) * g(rho, v)^(-m-1/2)",
    "type": "class",
    "file_path": "scipy\\scipy\\special\\_precompute\\wright_bessel.py",
    "ast_data": "ClassDef name:coef_C Assign FunctionDef name:eval arg:cls arg:m arg:rho arg:beta arguments arg arg arg arg If Compare Raise Call Assign Call Assign Call Call Assign Call Call Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "swap_XW",
    "source_code": "@staticmethod\ndef swap_XW(op: 'cutlass_library.gemm_op.GemmOperation') -> 'cutlass_library.gemm_op.GemmOperation':\n    new_op = copy.deepcopy(op)\n    new_op.A.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.A.layout)\n    new_op.B.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.B.layout)\n    new_op.A, new_op.B = (new_op.B, new_op.A)\n    new_op.C.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.C.layout)\n    new_op.D.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.D.layout)\n    return new_op",
    "docstring": "Swap operands X and W (aka operans A and B) of the GEMM operation. This requires transposing the operands, which is done by swapping the strides. Note that we don't change the apparent external layout, just the operand layout. this is intentional.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:swap_XW arg:op arguments arg Assign Call Assign Call Assign Call Assign Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "create_empty_output_dir",
    "source_code": "def create_empty_output_dir(output_directory: str, overwrite: bool=True) -> None:\n    if overwrite and file_io.file_exists_v2(output_directory):\n        logging.info('Deleting existing output directory: %s .', output_directory)\n        file_io.delete_recursively_v2(output_directory)\n    file_io.recursive_create_dir_v2(output_directory)",
    "docstring": "Creates the . If already exists, it recursively deletes all contents inside the directory. Also creates the parent & intermediate directories. Args: output_directory: Output directory. overwrite: Where to clean the output directory if exists.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\save_model.py",
    "ast_data": "FunctionDef name:create_empty_output_dir arg:output_directory arg:overwrite arguments arg arg If BoolOp Call Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_get_path_in_displaycoord",
    "source_code": "def _get_path_in_displaycoord(self):\n    dpi_cor = self._dpi_cor\n    posA = self._get_xy(self.xy1, self.coords1, self.axesA)\n    posB = self._get_xy(self.xy2, self.coords2, self.axesB)\n    path = self.get_connectionstyle()(posA, posB, patchA=self.patchA, patchB=self.patchB, shrinkA=self.shrinkA * dpi_cor, shrinkB=self.shrinkB * dpi_cor)\n    path, fillable = self.get_arrowstyle()(path, self.get_mutation_scale() * dpi_cor, self.get_linewidth() * dpi_cor, self.get_mutation_aspect())\n    return (path, fillable)",
    "docstring": "Return the mutated path of the arrow in display coordinates.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:_get_path_in_displaycoord arg:self arguments arg Assign Assign Call Assign Call Assign Call Call Assign Call Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "build_graph",
    "source_code": "def build_graph(device, input_shape, variable, num_inputs, axis, grad):\n    with ops.device('/%s:0' % device):\n        if not variable:\n            inputs = [array_ops.zeros(input_shape) for _ in range(num_inputs)]\n        elif axis == 1:\n            inputs = [array_ops.zeros([input_shape[0], random.randint(max(1, input_shape[1] - 5), input_shape[1] + 5)]) for _ in range(num_inputs)]\n        else:\n            inputs = [array_ops.zeros([random.randint(max(1, input_shape[0] - 5), input_shape[0] + 5), input_shape[1]]) for _ in range(num_inputs)]\n        outputs = [array_ops.concat(inputs, axis) for _ in range(100)]\n        if grad:\n            return control_flow_ops.group(*list(itertools.chain.from_iterable([gradients_impl.gradients(output, inputs) for output in outputs])))\n        else:\n            return control_flow_ops.group(*outputs)",
    "docstring": "Build a graph containing a sequence of concat operations. Args: device: string, the device to run on. input_shape: shape of the input tensors. variable: whether or not to randomize the input shape num_inputs: the number of inputs to concat axis: axis to be concat'ed grad: if True compute the gradient Returns: An array of tensors to run()",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\concat_benchmark.py",
    "ast_data": "FunctionDef name:build_graph arg:device arg:input_shape arg:variable arg:num_inputs arg:axis arg:grad arguments arg arg arg arg arg arg With Call If Assign Call Call If Compare Assign Call Call Call Call Assign Call Call Call Call Assign Call Call If Return return:yes Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "assert_existing_objects_matched",
    "source_code": "def assert_existing_objects_matched(self):\n    return self.assert_consumed()",
    "docstring": "Raises an exception if currently created objects are unmatched.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:assert_existing_objects_matched arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "set_swap_module_params_on_conversion",
    "source_code": "def set_swap_module_params_on_conversion(value: bool) -> None:\n    global _swap_module_params_on_conversion\n    _swap_module_params_on_conversion = value",
    "docstring": "Sets whether to use :func: instead of setting `~torch.__future__.get_overwrite_module_params_on_conversionnn.Module.cuda()nn.Module.float()nn.Module.tonn.Module.to_emptynn.Module.load_state_dict~nn.Module.load_state_dict~torch.Tensor.module_load~nn.Parameter~torch.utils.swap_tensors~torch.utils.swap_tensors` or not.",
    "type": "function",
    "file_path": "pytorch\\torch\\__future__.py",
    "ast_data": "FunctionDef name:set_swap_module_params_on_conversion arg:value arguments arg Assign"
  },
  {
    "library": "pandas",
    "name": "full_scope",
    "source_code": "@property\ndef full_scope(self) -> DeepChainMap:\n    maps = [self.temps] + self.resolvers.maps + self.scope.maps\n    return DeepChainMap(*maps)",
    "docstring": "Return the full scope for use with passing to engines transparently as a mapping. Returns ------- vars : DeepChainMap All variables in this scope.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\computation\\scope.py",
    "ast_data": "FunctionDef name:full_scope arg:self arguments arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_pg_count",
    "source_code": "def get_pg_count() -> int:\n    return _world.group_count",
    "docstring": "Return the number of process groups.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:get_pg_count arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "delete_recursively",
    "source_code": "@tf_export(v1=['gfile.DeleteRecursively'])\ndef delete_recursively(dirname):\n    delete_recursively_v2(dirname)",
    "docstring": "Deletes everything under dirname recursively. Args: dirname: string, a path to a directory Raises: errors.OpError: If the operation fails.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py",
    "ast_data": "FunctionDef name:delete_recursively arg:dirname arguments arg Call Call"
  },
  {
    "library": "pandas",
    "name": "remove_na_arraylike",
    "source_code": "def remove_na_arraylike(arr: Series | Index | np.ndarray):\n    if isinstance(arr.dtype, ExtensionDtype):\n        return arr[notna(arr)]\n    else:\n        return arr[notna(np.asarray(arr))]",
    "docstring": "Return array-like containing only true/non-NaN values, possibly empty.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\dtypes\\missing.py",
    "ast_data": "FunctionDef name:remove_na_arraylike arg:arr arguments arg If Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "cpu_count",
    "source_code": "def cpu_count(only_physical_cores=False):\n    os_cpu_count = os.cpu_count() or 1\n    if sys.platform == 'win32':\n        os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS)\n    cpu_count_user = _cpu_count_user(os_cpu_count)\n    aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1)\n    if not only_physical_cores:\n        return aggregate_cpu_count\n    if cpu_count_user < os_cpu_count:\n        return max(cpu_count_user, 1)\n    cpu_count_physical, exception = _count_physical_cores()\n    if cpu_count_physical != 'not found':\n        return cpu_count_physical\n    if exception is not None:\n        warnings.warn(f'Could not find the number of physical cores for the following reason:\\n{exception}\\nReturning the number of logical cores instead. You can silence this warning by setting LOKY_MAX_CPU_COUNT to the number of cores you want to use.', stacklevel=2)\n        traceback.print_tb(exception.__traceback__)\n    return aggregate_cpu_count",
    "docstring": "Return the number of CPUs the current process can use. The returned number of CPUs accounts for: * the number of CPUs in the system, as given by `` is True, return the number of physical cores instead of the number of logical cores (hyperthreading / SMT). Note that this option is not enforced if the number of usable cores is controlled in any other way such as: process affinity, Cgroup restricted CPU bandwidth or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical cores is not found, return the number of logical cores. Note that on Windows, the returned number of CPUs cannot exceed 61, see: It is also always larger or equal to 1.",
    "type": "function",
    "file_path": "scipy\\.spin\\cmds.py",
    "ast_data": "FunctionDef name:cpu_count arg:only_physical_cores arguments arg Assign BoolOp Call If Compare Assign Call Assign Call Assign Call Call If Return return:yes If Compare Return return:yes Call Assign Call If Compare Return return:yes If Compare Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "get_model_state_dict",
    "source_code": "def get_model_state_dict(model: nn.Module, *, submodules: Optional[set[nn.Module]]=None, options: Optional[StateDictOptions]=None) -> dict[str, ValueType]:\n    with _gc_context():\n        info = _verify_options(model, (), optim_only=False, submodules=submodules, options=options)\n        model_state_dict = _get_model_state_dict(model, info)\n        _verify_state_dict(model_state_dict, {}, info)\n        return model_state_dict",
    "docstring": "Return the model state_dict of `StateDictOptions`. :rtype: typing.Dict[str, ValueType]",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict.py",
    "ast_data": "FunctionDef name:get_model_state_dict arg:model arguments arg arg arg With Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "multi_rv_frozen",
    "source_code": "class multi_rv_frozen:\n\n    @property\n    def random_state(self):\n        return self._dist._random_state\n\n    @random_state.setter\n    def random_state(self, seed):\n        self._dist._random_state = check_random_state(seed)",
    "docstring": "Class which encapsulates common functionality between all frozen multivariate distributions.",
    "type": "class",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "ClassDef name:multi_rv_frozen FunctionDef name:random_state arg:self arguments arg Return return:yes FunctionDef name:random_state arg:self arg:seed arguments arg arg Assign Call"
  },
  {
    "library": "pytorch",
    "name": "__deepcopy__",
    "source_code": "def __deepcopy__(self, memo=None) -> 'Graph':\n    memo = memo if memo else {}\n    g = Graph(tracer_cls=self._tracer_cls)\n    output_vals = g.graph_copy(self, val_map=memo, return_output_node=True)\n    g._codegen = copy.deepcopy(self._codegen)\n    if output_vals is not None:\n        assert isinstance(output_vals, tuple)\n        output_val, old_output_node = output_vals\n        new_output_node = g.output(output_val, type_expr=getattr(old_output_node, 'type', None))\n        new_output_node.meta = copy.copy(old_output_node.meta)\n    return g",
    "docstring": "Explicitly implement __deepcopy__ to prevent excessive recursion depth from the default implementation. This uses graph_copy to copy the nodes in an iterative way, rather than recursive. It also populates the memoization table to prevent unnecessary copies (e.g. references to nodes or other parts of the Graph from a custom GraphModule implementation.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph.py",
    "ast_data": "FunctionDef name:__deepcopy__ arg:self arg:memo arguments arg arg Assign Assign Call Assign Call Assign Call If Compare Call Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "_create_method",
    "source_code": "@classmethod\ndef _create_method(cls, op, coerce_to_dtype: bool=True, result_dtype=None):\n\n    def _binop(self, other):\n\n        def convert_values(param):\n            if isinstance(param, ExtensionArray) or is_list_like(param):\n                ovalues = param\n            else:\n                ovalues = [param] * len(self)\n            return ovalues\n        if isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)):\n            return NotImplemented\n        lvalues = self\n        rvalues = convert_values(other)\n        res = [op(a, b) for a, b in zip(lvalues, rvalues)]\n\n        def _maybe_convert(arr):\n            if coerce_to_dtype:\n                res = maybe_cast_pointwise_result(arr, self.dtype, same_dtype=False)\n                if not isinstance(res, type(self)):\n                    res = np.asarray(arr)\n            else:\n                res = np.asarray(arr, dtype=result_dtype)\n            return res\n        if op.__name__ in {'divmod', 'rdivmod'}:\n            a, b = zip(*res)\n            return (_maybe_convert(a), _maybe_convert(b))\n        return _maybe_convert(res)\n    op_name = f'__{op.__name__}__'\n    return set_function_name(_binop, op_name, cls)",
    "docstring": "A class method that returns a method that will correspond to an operator for an ExtensionArray subclass, by dispatching to the relevant operator defined on the individual elements of the ExtensionArray. Parameters ---------- op : function An operator that takes arguments op(a, b) coerce_to_dtype : bool, default True boolean indicating whether to attempt to convert the result to the underlying ExtensionArray dtype. If it's not possible to create a new ExtensionArray with the values, an ndarray is returned instead. Returns ------- Callable[[Any, Any], Union[ndarray, ExtensionArray]] A method that can be bound to a class. When used, the method receives the two arguments, one of which is the instance of this class, and should return an ExtensionArray or an ndarray. Returning an ndarray may be necessary when the result of the cannot be stored in the ExtensionArray. The dtype of the ndarray uses NumPy's normal inference rules. Examples -------- Given an ExtensionArray subclass called MyExtensionArray, use __add__ = cls._create_method(operator.add) in the class definition of MyExtensionArray to create the operator for addition, that will be based on the operator implementation of the underlying elements of the ExtensionArray",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\arrays\\base.py",
    "ast_data": "FunctionDef name:_create_method arg:cls arg:op arg:coerce_to_dtype arg:result_dtype arguments arg arg arg arg FunctionDef name:_binop arg:self arg:other arguments arg arg FunctionDef name:convert_values arg:param arguments arg If BoolOp Call Call Assign Assign Call Return return:yes If Call Return return:yes Assign Assign Call Assign Call Call FunctionDef name:_maybe_convert arg:arr arguments arg If Assign Call If Call Call Assign Call Assign Call Return return:yes If Compare Assign Call Return return:yes Call Call Return return:yes Call Assign Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "mode",
    "source_code": "def mode(self, df, scale):\n    dim, df, scale = self._process_parameters(df, scale)\n    out = self._mode(dim, df, scale)\n    return _squeeze_output(out)",
    "docstring": "Mode of the inverse Wishart distribution. Parameters ---------- %(_doc_default_callparams)s Returns ------- mode : float The Mode of the distribution",
    "type": "method",
    "file_path": "scipy\\scipy\\stats\\_multivariate.py",
    "ast_data": "FunctionDef name:mode arg:self arg:df arg:scale arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_make_indexable",
    "source_code": "def _make_indexable(iterable):\n    if sp.issparse(iterable):\n        return iterable.tocsr()\n    elif hasattr(iterable, '__getitem__') or hasattr(iterable, 'iloc'):\n        return iterable\n    elif iterable is None:\n        return iterable\n    return np.array(iterable)",
    "docstring": "Ensure iterable supports indexing or convert to an indexable variant. Convert sparse matrices to csr and other non-indexable iterable to arrays. Let and indexable objects (e.g. pandas dataframes) pass unchanged. Parameters ---------- iterable : {list, dataframe, ndarray, sparse matrix} or None Object to be converted to an indexable iterable.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:_make_indexable arg:iterable arguments arg If Call Return return:yes Call If BoolOp Call Call Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "serialize_json",
    "source_code": "def serialize_json(self, header_obj, payload, key):\n    payload_segment = json_b64encode(payload)\n\n    def _sign(jws_header):\n        self._validate_private_headers(jws_header)\n        _alg, _key = self._prepare_algorithm_key(jws_header, payload, key)\n        protected_segment = json_b64encode(jws_header.protected)\n        signing_input = b'.'.join([protected_segment, payload_segment])\n        signature = urlsafe_b64encode(_alg.sign(signing_input, _key))\n        rv = {'protected': to_unicode(protected_segment), 'signature': to_unicode(signature)}\n        if jws_header.header is not None:\n            rv['header'] = jws_header.header\n        return rv\n    if isinstance(header_obj, dict):\n        data = _sign(JWSHeader.from_dict(header_obj))\n        data['payload'] = to_unicode(payload_segment)\n        return data\n    signatures = [_sign(JWSHeader.from_dict(h)) for h in header_obj]\n    return {'payload': to_unicode(payload_segment), 'signatures': signatures}",
    "docstring": "Generate a JWS JSON Serialization. The JWS JSON Serialization represents digitally signed or MACed content as a JSON object, per _. :param header_obj: A dict/list of header :param payload: A string/dict of payload :param key: Private key used to generate signature :return: JWSObject Example `` of JWS JSON Serialization:: { \"protected: {\"alg\": \"HS256\"}, \"header\": {\"kid\": \"jose\"} } Pass a dict to generate flattened JSON Serialization, pass a list of header dict to generate standard JSON Serialization.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7515\\jws.py",
    "ast_data": "FunctionDef name:serialize_json arg:self arg:header_obj arg:payload arg:key arguments arg arg arg arg Assign Call FunctionDef name:_sign arg:jws_header arguments arg Call Assign Call Assign Call Assign Call Assign Call Call Assign Call Call If Compare Assign Return return:yes If Call Assign Call Call Assign Call Return return:yes Assign Call Call Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "_fromnxfunction_args",
    "source_code": "class _fromnxfunction_args(_fromnxfunction):\n\n    def __call__(self, *args, **params):\n        func = getattr(np, self.__name__)\n        arrays = []\n        args = list(args)\n        while len(args) > 0 and issequence(args[0]):\n            arrays.append(args.pop(0))\n        res = []\n        for x in arrays:\n            _d = func(np.asarray(x), *args, **params)\n            _m = func(getmaskarray(x), *args, **params)\n            res.append(masked_array(_d, mask=_m))\n        if len(arrays) == 1:\n            return res[0]\n        return res",
    "docstring": "A version of that is called with multiple array arguments. The first non-array-like input marks the beginning of the arguments that are passed verbatim for both the data and mask calls. Array arguments are processed independently and the results are returned in a list. If only one array is found, the return value is just the processed array instead of a list.",
    "type": "class",
    "file_path": "numpy\\numpy\\ma\\extras.py",
    "ast_data": "ClassDef name:_fromnxfunction_args FunctionDef name:__call__ arg:self arguments arg arg arg Assign Call Assign Assign Call While BoolOp Compare Call Call Call Call Assign For Assign Call Call Assign Call Call Call Call If Compare Call Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "valfilter",
    "source_code": "def valfilter(predicate, d, factory=dict):\n    rv = factory()\n    for k, v in d.items():\n        if predicate(v):\n            rv[k] = v\n    return rv",
    "docstring": "Filter items in dictionary by value >>> iseven = lambda x: x % 2 == 0 >>> d = {1: 2, 2: 3, 3: 4, 4: 5} >>> valfilter(iseven, d) {1: 2, 3: 4} See Also: keyfilter itemfilter valmap",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py",
    "ast_data": "FunctionDef name:valfilter arg:predicate arg:d arg:factory arguments arg arg arg Assign Call For Call If Call Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "ShardedState",
    "source_code": "class ShardedState(Enum):\n    SHARDED = auto()\n    SHARDED_POST_FORWARD = auto()\n    UNSHARDED = auto()",
    "docstring": "- ``: The unsharded parameter is registered to the module. Both it and the sharded parameter contribute to parameter memory.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_param.py",
    "ast_data": "ClassDef name:ShardedState Assign Call Assign Call Assign Call"
  },
  {
    "library": "scipy",
    "name": "PermFunction01",
    "source_code": "class PermFunction01(Benchmark):\n    change_dimensionality = True\n\n    def __init__(self, dimensions=2):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([-self.N] * self.N, [self.N + 1] * self.N))\n        self.global_optimum = [list(range(1, self.N + 1))]\n        self.fglob = 0.0\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        b = 0.5\n        k = atleast_2d(arange(self.N) + 1).T\n        j = atleast_2d(arange(self.N) + 1)\n        s = (j ** k + b) * ((x / j) ** k - 1)\n        return sum(sum(s, axis=1) ** 2)",
    "docstring": "PermFunction 1 objective function. This class defines the PermFunction1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{PermFunction01}}(x) = \\sum_{k=1}^n \\left\\{ \\sum_{j=1}^n (j^k + \\beta) \\left[ \\left(\\frac{x_j}{j}\\right)^k - 1 \\right] \\right\\}^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO: line 560",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py",
    "ast_data": "ClassDef name:PermFunction01 Assign FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Assign Call Call Assign Call Call Assign Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "bias_add_v1",
    "source_code": "def bias_add_v1(value, bias, name=None):\n    with ops.name_scope(name, 'BiasAddV1', [value, bias]) as name:\n        value = ops.convert_to_tensor(value, name='input')\n        bias = ops.convert_to_tensor(bias, dtype=value.dtype, name='bias')\n        return gen_nn_ops.bias_add_v1(value, bias, name=name)",
    "docstring": "Adds to . This is a deprecated version of bias_add and will soon to be removed. This is (mostly) a special case of where is restricted to 1-D. Broadcasting is supported, so may have any number of dimensions. Unlike , the type of is allowed to differ from in the case where both types are quantized. Args: value: A with type , , , , , , , , or . bias: A 1-D with size matching the last dimension of . Must be the same type as unless is a quantized type, in which case a different quantized type may be used. name: A name for the operation (optional). Returns: A with the same type as .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py",
    "ast_data": "FunctionDef name:bias_add_v1 arg:value arg:bias arg:name arguments arg arg arg With Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_ensure_c_contiguous",
    "source_code": "def _ensure_c_contiguous(self):\n    if not self.x.flags.c_contiguous:\n        self.x = self.x.copy()\n    if not self.c.flags.c_contiguous:\n        self.c = self.c.copy()",
    "docstring": "c and x may be modified by the user. The Cython code expects that they are C contiguous.",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:_ensure_c_contiguous arg:self arguments arg If Assign Call If Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "read_execution_stack_trace",
    "source_code": "def read_execution_stack_trace(self, execution):\n    host_name = self._stack_frame_by_id[execution.stack_frame_ids[0]][0]\n    return (host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in execution.stack_frame_ids])",
    "docstring": "Read the stack trace of a given Execution object. Args: execution: The Execution object of interest. Returns: 1. The host name. 2. The stack trace, as a list of (file_path, lineno, func) tuples.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py",
    "ast_data": "FunctionDef name:read_execution_stack_trace arg:self arg:execution arguments arg arg Assign Return return:yes"
  },
  {
    "library": "numpy",
    "name": "fromstring",
    "source_code": "def fromstring(s, language=Language.C):\n    r = _FromStringWorker(language=language).parse(s)\n    if isinstance(r, Expr):\n        return r\n    raise ValueError(f'failed to parse `{s}` to Expr instance: got `{r}`')",
    "docstring": "Create an expression from a string. This is a \"lazy\" parser, that is, only arithmetic operations are resolved, non-arithmetic operations are treated as symbols.",
    "type": "function",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "FunctionDef name:fromstring arg:s arg:language arguments arg arg Assign Call Call If Call Return return:yes Raise Call"
  },
  {
    "library": "pandas",
    "name": "isna",
    "source_code": "@final\ndef isna(self) -> npt.NDArray[np.bool_]:\n    return self._isnan",
    "docstring": "Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as `numpy.NaNpd.NaT''numpy.infNaT` (Not a Time) is considered as an NA value. >>> idx = pd.DatetimeIndex( ... [pd.Timestamp(\"1940-04-25\"), pd.Timestamp(\"\"), None, pd.NaT] ... ) >>> idx DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'], dtype='datetime64[s]', freq=None) >>> idx.isna() array([False, True, True, True])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:isna arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_saved_model_tag_sets",
    "source_code": "def get_saved_model_tag_sets(saved_model_dir):\n    saved_model = read_saved_model(saved_model_dir)\n    all_tags = []\n    for meta_graph_def in saved_model.meta_graphs:\n        all_tags.append(list(meta_graph_def.meta_info_def.tags))\n    return all_tags",
    "docstring": "Retrieves all the tag-sets available in the SavedModel. Args: saved_model_dir: Directory containing the SavedModel. Returns: List of all tag-sets in the SavedModel, where a tag-set is represented as a list of strings.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\tools\\saved_model_utils.py",
    "ast_data": "FunctionDef name:get_saved_model_tag_sets arg:saved_model_dir arguments arg Assign Call Assign For Call Call Return return:yes"
  },
  {
    "library": "authlib",
    "name": "save_token",
    "source_code": "def save_token(self, token):\n    return self.server.save_token(token, self.request)",
    "docstring": "A method to save token into database.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\base.py",
    "ast_data": "FunctionDef name:save_token arg:self arg:token arguments arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_repr_html_",
    "source_code": "def _repr_html_(self):\n    png_bytes = self._repr_png_()\n    png_base64 = base64.b64encode(png_bytes).decode('ascii')\n\n    def color_block(color):\n        hex_color = to_hex(color, keep_alpha=True)\n        return f'<div title=\"{hex_color}\" style=\"display: inline-block; width: 1em; height: 1em; margin: 0; vertical-align: middle; border: 1px solid #555; background-color: {hex_color};\"></div>'\n    return f'<div style=\"vertical-align: middle;\"><strong>{self.name}</strong> </div><div class=\"cmap\"><img alt=\"{self.name} colormap\" title=\"{self.name}\" style=\"border: 1px solid #555;\" src=\"data:image/png;base64,{png_base64}\"></div><div style=\"vertical-align: middle; max-width: {_REPR_PNG_SIZE[0] + 2}px; display: flex; justify-content: space-between;\"><div style=\"float: left;\">{color_block(self.get_under())} under</div><div style=\"margin: 0 auto; display: inline-block;\">bad {color_block(self.get_bad())}</div><div style=\"float: right;\">over {color_block(self.get_over())}</div></div>'",
    "docstring": "Generate an HTML representation of the Colormap.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:_repr_html_ arg:self arguments arg Assign Call Assign Call Call FunctionDef name:color_block arg:color arguments arg Assign Call Return return:yes Return return:yes Call Call Call Call Call Call"
  },
  {
    "library": "pandas",
    "name": "contains",
    "source_code": "@forbid_nonstring_types(['bytes'])\ndef contains(self, pat, case: bool=True, flags: int=0, na=lib.no_default, regex: bool=True):\n    if regex and re.compile(pat).groups:\n        warnings.warn('This pattern is interpreted as a regular expression, and has match groups. To actually get the groups, use str.extract.', UserWarning, stacklevel=find_stack_level())\n    result = self._data.array._str_contains(pat, case, flags, na, regex)\n    return self._wrap_result(result, fill_value=na, returns_string=False)",
    "docstring": "Test if pattern or regex is contained within a string of a Series or Index. Return boolean Series or Index based on whether a given pattern or regex is contained within a string of a Series or Index. Parameters ---------- pat : str Character sequence or regular expression. case : bool, default True If True, case sensitive. flags : int, default 0 (no flags) Flags to pass through to the re module, e.g. re.IGNORECASE. na : scalar, optional Fill value for missing values. The default depends on dtype of the array. For object-dtype, `casenaFalseNaNFalseboolobjectflagspatregexs2[1]s2[3]True`. However, '.0' as a regex matches any character followed by a 0. >>> s2 = pd.Series([\"40\", \"40.0\", \"41\", \"41.0\", \"35\"]) >>> s2.str.contains(\".0\", regex=True) 0 True 1 True 2 False 3 True 4 False dtype: bool",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\strings\\accessor.py",
    "ast_data": "FunctionDef name:contains arg:self arg:pat arg:case arg:flags arg:na arg:regex arguments arg arg arg arg arg arg If BoolOp Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_benchmark_defun",
    "source_code": "def _benchmark_defun(self):\n\n    @def_function.function\n    def cond_fn(x):\n        return self._create_cond(x)\n    for _ in range(self.NUM_WARM_UP_ITERS):\n        cond_fn(0.0)\n    start_time = time.time()\n    for _ in range(self.NUM_ITERS):\n        cond_fn(0.0)\n    self.report_benchmark(wall_time=time.time() - start_time, iters=self.NUM_ITERS)",
    "docstring": "Benchmarks cond in a defun.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops_benchmark.py",
    "ast_data": "FunctionDef name:_benchmark_defun arg:self arguments arg FunctionDef name:cond_fn arg:x arguments arg Return return:yes Call For Call Call Assign Call For Call Call Call Call"
  },
  {
    "library": "scrapy",
    "name": "add_options",
    "source_code": "def add_options(self, parser: argparse.ArgumentParser) -> None:\n    group = parser.add_argument_group(title='Global Options')\n    group.add_argument('--logfile', metavar='FILE', help='log file. if omitted stderr will be used')\n    group.add_argument('-L', '--loglevel', metavar='LEVEL', default=None, help=f'log level (default: {self.settings['LOG_LEVEL']})')\n    group.add_argument('--nolog', action='store_true', help='disable logging completely')\n    group.add_argument('--profile', metavar='FILE', default=None, help='write python cProfile stats to FILE')\n    group.add_argument('--pidfile', metavar='FILE', help='write process ID to FILE')\n    group.add_argument('-s', '--set', action='append', default=[], metavar='NAME=VALUE', help='set/override setting (may be repeated)')\n    group.add_argument('--pdb', action='store_true', help='enable pdb on failure')",
    "docstring": "Populate option parse with options available for this command",
    "type": "method",
    "file_path": "scrapy\\scrapy\\commands\\__init__.py",
    "ast_data": "FunctionDef name:add_options arg:self arg:parser arguments arg arg Assign Call Call Call Call Call Call Call Call"
  },
  {
    "library": "cryptography",
    "name": "finalize",
    "source_code": "@abc.abstractmethod\ndef finalize(self) -> bytes:\n    pass",
    "docstring": "Finalize the padding, returns bytes.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\padding.py",
    "ast_data": "FunctionDef name:finalize arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "resource_variables_enabled",
    "source_code": "@tf_export(v1=['resource_variables_enabled'])\ndef resource_variables_enabled() -> bool:\n    return _DEFAULT_USE_RESOURCE",
    "docstring": "Returns if resource variables are enabled. Resource variables are improved versions of TensorFlow variables with a well-defined memory model. Accessing a resource variable reads its value, and all ops which access a specific read value of the variable are guaranteed to see the same value for that tensor. Writes which happen after a read (by having a control or data dependency on the read) are guaranteed not to affect the value of the read tensor, and similarly writes which happen before a read are guaranteed to affect the value. No guarantees are made about unordered read/write pairs. Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0 feature.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variables_toggle.py",
    "ast_data": "FunctionDef name:resource_variables_enabled arguments Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_geometry_type",
    "source_code": "def get_geometry_type(self, table_name, description):\n    with self.connection.cursor() as cursor:\n        cursor.execute('\\n                SELECT t.coord_dimension, t.srid, t.type FROM (\\n                    SELECT * FROM geometry_columns\\n                    UNION ALL\\n                    SELECT * FROM geography_columns\\n                ) AS t WHERE t.f_table_name = %s AND t.f_geometry_column = %s\\n            ', (table_name, description.name))\n        row = cursor.fetchone()\n        if not row:\n            raise Exception('Could not find a geometry or geography column for \"%s\".\"%s\"' % (table_name, description.name))\n        dim, srid, field_type = row\n        field_type = OGRGeomType(field_type).django\n        field_params = {}\n        if self.postgis_oid_lookup.get(description.type_code) == 'geography':\n            field_params['geography'] = True\n        if srid != 4326:\n            field_params['srid'] = srid\n        if dim != 2:\n            field_params['dim'] = dim\n    return (field_type, field_params)",
    "docstring": "The geometry type OID used by PostGIS does not indicate the particular type of field that a geometry column is (e.g., whether it's a PointField or a PolygonField). Thus, this routine queries the PostGIS metadata tables to determine the geometry type.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\introspection.py",
    "ast_data": "FunctionDef name:get_geometry_type arg:self arg:table_name arg:description arguments arg arg arg With Call Call Assign Call If Raise Call Assign Assign Call Assign If Compare Call Assign If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_tf_tensor_list_append",
    "source_code": "def _tf_tensor_list_append(list_, x):\n\n    def empty_list_of_elements_like_x():\n        tensor_x = ops.convert_to_tensor(x)\n        return list_ops.empty_tensor_list(element_shape=array_ops.shape(tensor_x), element_dtype=tensor_x.dtype)\n    list_ = cond.cond(list_ops.tensor_list_length(list_) > 0, lambda: list_, empty_list_of_elements_like_x)\n    return list_ops.tensor_list_push_back(list_, x)",
    "docstring": "Overload of list_append that stages a Tensor list write.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py",
    "ast_data": "FunctionDef name:_tf_tensor_list_append arg:list_ arg:x arguments arg arg FunctionDef name:empty_list_of_elements_like_x arguments Assign Call Return return:yes Call Call Assign Call Compare Call arguments Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "checkpoint_exists",
    "source_code": "@deprecation.deprecated(date=None, instructions='Use standard file APIs to check for files with this prefix.')\n@tf_export(v1=['train.checkpoint_exists'])\ndef checkpoint_exists(checkpoint_prefix):\n    return checkpoint_exists_internal(checkpoint_prefix)",
    "docstring": "Checks whether a V1 or V2 checkpoint exists with the specified prefix. This is the recommended way to check if a checkpoint exists, since it takes into account the naming difference between V1 and V2 formats. Args: checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking priority. Typically the result of or that of , regardless of sharded/non-sharded or V1/V2. Returns: A bool, true if a checkpoint referred to by exists.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py",
    "ast_data": "FunctionDef name:checkpoint_exists arg:checkpoint_prefix arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "CheckpointInitialValue",
    "source_code": "@tf_export('__internal__.tracking.CheckpointInitialValue', v1=[])\nclass CheckpointInitialValue(object):\n\n    def __init__(self, checkpoint_position, shape=None, shard_info=None):\n        if shard_info:\n            full_shape_str = ' '.join(('%d' % d for d in shape)) + ' '\n            slice_spec = ':'.join(('%d,%d' % (o, s) for o, s in zip(shard_info.offset, shard_info.shape)))\n            shape_and_slice = full_shape_str + slice_spec\n        else:\n            shape_and_slice = ''\n        self.wrapped_value = checkpoint_position.value_tensors({VARIABLE_VALUE_KEY: shape_and_slice})[VARIABLE_VALUE_KEY]\n        self._checkpoint_position = checkpoint_position\n\n    def __tf_tensor__(self, dtype=None, name=None):\n        del dtype\n        del name\n        return self.wrapped_value\n\n    @property\n    def checkpoint_position(self):\n        return self._checkpoint_position",
    "docstring": "Tensor wrapper for managing update UIDs in . When supplied as an initial value, objects of this type let a (, , etc.) know the UID of the restore the initial value came from. This allows deferred restorations to be sequenced in the order the user specified them, and lets us fall back on assignment if an initial value is not set (e.g. due to a custom getter interfering). See comments in _add_variable_with_custom_getter for more information about how is used.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\trackable\\base.py",
    "ast_data": "ClassDef name:CheckpointInitialValue FunctionDef name:__init__ arg:self arg:checkpoint_position arg:shape arg:shard_info arguments arg arg arg arg If Assign Call Assign Call Call Assign Assign Assign Call Assign FunctionDef name:__tf_tensor__ arg:self arg:dtype arg:name arguments arg arg arg Return return:yes FunctionDef name:checkpoint_position arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "set_verbosity",
    "source_code": "@tf_export(v1=['logging.set_verbosity'])\ndef set_verbosity(v):\n    get_logger().setLevel(v)",
    "docstring": "Sets the threshold for what messages will be logged.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py",
    "ast_data": "FunctionDef name:set_verbosity arg:v arguments arg Call Call Call"
  },
  {
    "library": "scipy",
    "name": "MikotaM",
    "source_code": "class MikotaM(LinearOperator):\n\n    def __init__(self, shape, dtype=np.float64):\n        self.shape = shape\n        self.dtype = dtype\n        super().__init__(dtype, shape)\n\n    def _diag(self):\n        return (1.0 / np.arange(1, self.shape[0] + 1)).astype(self.dtype)\n\n    def tobanded(self):\n        return self._diag()\n\n    def tosparse(self):\n        from scipy.sparse import diags_array\n        return diags_array([self._diag()], offsets=[0], shape=self.shape, dtype=self.dtype)\n\n    def toarray(self):\n        return np.diag(self._diag()).astype(self.dtype)\n\n    def _matvec(self, x):\n        x = x.reshape(self.shape[0], -1)\n        return self._diag()[:, np.newaxis] * x\n\n    def _matmat(self, x):\n        return self._matvec(x)\n\n    def _adjoint(self):\n        return self\n\n    def _transpose(self):\n        return self",
    "docstring": "Construct a mass matrix in various formats of Mikota pair. The mass matrix is square real diagonal positive definite with entries that are reciprocal to integers. Parameters ---------- shape : tuple of int The shape of the matrix. dtype : dtype Numerical type of the array. Default is ``. Methods ------- toarray() Construct a dense array from Mikota data tosparse() Construct a sparse array from Mikota data tobanded() The format for banded symmetric matrices, i.e., (1, n) ndarray with the main diagonal.",
    "type": "class",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "ClassDef name:MikotaM FunctionDef name:__init__ arg:self arg:shape arg:dtype arguments arg arg arg Assign Assign Call Call FunctionDef name:_diag arg:self arguments arg Return return:yes Call Call FunctionDef name:tobanded arg:self arguments arg Return return:yes Call FunctionDef name:tosparse arg:self arguments arg Return return:yes Call Call FunctionDef name:toarray arg:self arguments arg Return return:yes Call Call Call FunctionDef name:_matvec arg:self arg:x arguments arg arg Assign Call Return return:yes Call FunctionDef name:_matmat arg:self arg:x arguments arg arg Return return:yes Call FunctionDef name:_adjoint arg:self arguments arg Return return:yes FunctionDef name:_transpose arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_unitless_position",
    "source_code": "def get_unitless_position(self):\n    x = float(self.convert_xunits(self._x))\n    y = float(self.convert_yunits(self._y))\n    return (x, y)",
    "docstring": "Return the (x, y) unitless position of the text.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\text.py",
    "ast_data": "FunctionDef name:get_unitless_position arg:self arguments arg Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "is_interactive",
    "source_code": "def is_interactive():\n    return rcParams['interactive']",
    "docstring": "Return whether to redraw after every plotting command. .. note:: This function is only intended for use in backends. End users should use instead.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\__init__.py",
    "ast_data": "FunctionDef name:is_interactive arguments Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "eval",
    "source_code": "def eval(self, session=None):\n    if context.executing_eagerly():\n        raise RuntimeError('This operation is not supported when eager execution is enabled.')\n    return self._graph_element.eval(session=session)",
    "docstring": "Evaluates and returns the value of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:eval arg:self arg:session arguments arg arg If Call Raise Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "_ensure_constraint",
    "source_code": "def _ensure_constraint(self, trial):\n    mask = np.bitwise_or(trial > 1, trial < 0)\n    if (oob := np.count_nonzero(mask)):\n        trial[mask] = self.random_number_generator.uniform(size=oob)",
    "docstring": "Make sure the parameters lie between the limits.",
    "type": "method",
    "file_path": "scipy\\scipy\\optimize\\_differentialevolution.py",
    "ast_data": "FunctionDef name:_ensure_constraint arg:self arg:trial arguments arg arg Assign Call Compare Compare If Call Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "interpolated",
    "source_code": "def interpolated(self, steps):\n    if steps == 1 or len(self) == 0:\n        return self\n    if self.codes is not None and self.MOVETO in self.codes[1:]:\n        return self.make_compound_path(*(p.interpolated(steps) for p in self._iter_connected_components()))\n    if self.codes is not None and self.CLOSEPOLY in self.codes and (not np.all(self.vertices[self.codes == self.CLOSEPOLY] == self.vertices[0])):\n        vertices = self.vertices.copy()\n        vertices[self.codes == self.CLOSEPOLY] = vertices[0]\n    else:\n        vertices = self.vertices\n    vertices = simple_linear_interpolation(vertices, steps)\n    codes = self.codes\n    if codes is not None:\n        new_codes = np.full((len(codes) - 1) * steps + 1, Path.LINETO, dtype=self.code_type)\n        new_codes[0::steps] = codes\n    else:\n        new_codes = None\n    return Path(vertices, new_codes)",
    "docstring": "Return a new path with each segment divided into *steps* parts. Codes other than , , and are not handled correctly. Parameters ---------- steps : int The number of segments in the new path for each in the original. Returns ------- Path The interpolated path.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\path.py",
    "ast_data": "FunctionDef name:interpolated arg:self arg:steps arguments arg arg If BoolOp Compare Compare Call Return return:yes If BoolOp Compare Compare Return return:yes Call Call Call If BoolOp Compare Compare Call Compare Compare Assign Call Assign Compare Assign Assign Call Assign If Compare Assign Call Call Assign Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_uv_as_tensors",
    "source_code": "def _get_uv_as_tensors(self):\n    u = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.u)\n    if self.v is self.u:\n        v = u\n    else:\n        v = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.v)\n    return (u, v)",
    "docstring": "Get (self.u, self.v) as tensors (in case they were refs).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_low_rank_update.py",
    "ast_data": "FunctionDef name:_get_uv_as_tensors arg:self arguments arg Assign Call If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "is_masked",
    "source_code": "def is_masked(x):\n    m = getmask(x)\n    if m is nomask:\n        return False\n    elif m.any():\n        return True\n    return False",
    "docstring": "Determine whether input has masked values. Accepts any object as input, but always returns False unless the input is a MaskedArray containing masked values. Parameters ---------- x : array_like Array to check for masked values. Returns ------- result : bool True if is a MaskedArray with masked values, False otherwise. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> x masked_array(data=[--, 1, --, 2, 3], mask=[ True, False, True, False, False], fill_value=0) >>> ma.is_masked(x) True >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) >>> x masked_array(data=[0, 1, 0, 2, 3], mask=False, fill_value=42) >>> ma.is_masked(x) False Always returns False if isn't a MaskedArray. >>> x = [False, True, False] >>> ma.is_masked(x) False >>> x = 'a string' >>> ma.is_masked(x) False",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:is_masked arg:x arguments arg Assign Call If Compare Return return:yes If Call Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "inner_rank",
    "source_code": "@property\ndef inner_rank(self):\n    return tensor_shape.dimension_value(self._static_inner_shape.rank)",
    "docstring": "The rank of inner_shape.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py",
    "ast_data": "FunctionDef name:inner_rank arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_fused_matmul_reduce_scatter",
    "source_code": "@torch.library.impl(lib, 'fused_matmul_reduce_scatter', 'CUDA')\ndef _fused_matmul_reduce_scatter(A: torch.Tensor, B: torch.Tensor, reduce_op: str, scatter_dim: int, group_name: str) -> torch.Tensor:\n    if _is_test_mode:\n        return _fused_matmul_reduce_scatter_fallback(A, B, reduce_op, scatter_dim, group_name)\n    with torch.profiler.record_function('fused_matmul_reduce_scatter'):\n        return _fused_matmul_reduce_scatter_impl(mm_out_op=torch.ops.aten.mm.out, A=A, B=B, kwargs={}, out_dtype=A.dtype, reduce_op=reduce_op, scatter_dim=scatter_dim, group_name=group_name)",
    "docstring": "Perform the following logic with micro-pipelined computation and communication: reduce_scatter_tensor(A @ B, reduce_op, scatter_dim, group_name) Optimal stride order for A - if A.movedim(scatter_dim, 0) is contiguous, no extra copy is required for input layout transformation. Otherwise A needs to be copied once.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py",
    "ast_data": "FunctionDef name:_fused_matmul_reduce_scatter arg:A arg:B arg:reduce_op arg:scatter_dim arg:group_name arguments arg arg arg arg arg If Return return:yes Call With Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "_inspect_cell_array",
    "source_code": "def _inspect_cell_array(ndarray):\n    elem_list = []\n    for sub_elem in ndarray:\n        if isinstance(sub_elem, mat_struct):\n            elem_list.append(_matstruct_to_dict(sub_elem))\n        elif _has_struct(sub_elem):\n            elem_list.append(_inspect_cell_array(sub_elem))\n        else:\n            elem_list.append(sub_elem)\n    return elem_list",
    "docstring": "Construct lists from cell arrays (loaded as numpy ndarrays), recursing into items if they contain mat_struct objects.",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py",
    "ast_data": "FunctionDef name:_inspect_cell_array arg:ndarray arguments arg Assign For If Call Call Call If Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_non_slot_variables",
    "source_code": "def _non_slot_variables(self):\n    return self._non_slot_dict.values()",
    "docstring": "Additional variables created by the . Returns: A list or tuple of variables.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_non_slot_variables arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "code",
    "source_code": "@property\ndef code(self) -> str:\n    if not hasattr(self, '_code'):\n        raise RuntimeError('Code has not been generated! Please report a bug to PyTorch')\n    return self._code",
    "docstring": "Return the Python code generated from the ``.",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\graph_module.py",
    "ast_data": "FunctionDef name:code arg:self arguments arg If Call Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "on_test_begin",
    "source_code": "@doc_controls.for_subclass_implementers\ndef on_test_begin(self, logs=None):\n    pass",
    "docstring": "Called at the beginning of evaluation or validation. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py",
    "ast_data": "FunctionDef name:on_test_begin arg:self arg:logs arguments arg arg"
  },
  {
    "library": "scikit-learn",
    "name": "_estimate_means",
    "source_code": "def _estimate_means(self, nk, xk):\n    self.mean_precision_ = self.mean_precision_prior_ + nk\n    self.means_ = (self.mean_precision_prior_ * self.mean_prior_ + nk[:, np.newaxis] * xk) / self.mean_precision_[:, np.newaxis]",
    "docstring": "Estimate the parameters of the Gaussian distribution. Parameters ---------- nk : array-like of shape (n_components,) xk : array-like of shape (n_components, n_features)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_bayesian_mixture.py",
    "ast_data": "FunctionDef name:_estimate_means arg:self arg:nk arg:xk arguments arg arg arg Assign Assign"
  },
  {
    "library": "tensorflow",
    "name": "download",
    "source_code": "def download(directory, filename):\n    filepath = os.path.join(directory, filename)\n    if tf.io.gfile.exists(filepath):\n        return filepath\n    if not tf.io.gfile.exists(directory):\n        tf.io.gfile.makedirs(directory)\n    url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'\n    _, zipped_filepath = tempfile.mkstemp(suffix='.gz')\n    print('Downloading %s to %s' % (url, zipped_filepath))\n    urllib.request.urlretrieve(url, zipped_filepath)\n    with gzip.open(zipped_filepath, 'rb') as f_in, tf.io.gfile.Gfile(filepath, 'wb') as f_out:\n        shutil.copyfileobj(f_in, f_out)\n    os.remove(zipped_filepath)\n    return filepath",
    "docstring": "Download (and unzip) a file from the MNIST dataset if not already done.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\lite\\tutorials\\dataset.py",
    "ast_data": "FunctionDef name:download arg:directory arg:filename arguments arg arg Assign Call If Call Return return:yes If Call Call Assign Assign Call Call Call With Call Call Call Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "inv",
    "source_code": "@_apply_over_batch(('a', 2))\ndef inv(a, overwrite_a=False, check_finite=True):\n    a1 = _asarray_validated(a, check_finite=check_finite)\n    if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:\n        raise ValueError('expected square matrix')\n    if a1.size == 0:\n        dt = inv(np.eye(2, dtype=a1.dtype)).dtype\n        return np.empty_like(a1, dtype=dt)\n    overwrite_a = overwrite_a or _datacopied(a1, a)\n    getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri', 'getri_lwork'), (a1,))\n    lu, piv, info = getrf(a1, overwrite_a=overwrite_a)\n    if info == 0:\n        lwork = _compute_lwork(getri_lwork, a1.shape[0])\n        lwork = int(1.01 * lwork)\n        inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)\n    if info > 0:\n        raise LinAlgError('singular matrix')\n    if info < 0:\n        raise ValueError(f'illegal value in {-info}-th argument of internal getrf|getri')\n    return inv_a",
    "docstring": "Compute the inverse of a matrix. Parameters ---------- a : array_like Square matrix to be inverted. overwrite_a : bool, optional Discard data in (may improve performance). Default is False. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- ainv : ndarray Inverse of the matrix . Raises ------ LinAlgError If is singular. ValueError If is not square, or not 2D. Examples -------- >>> import numpy as np >>> from scipy import linalg >>> a = np.array([[1., 2.], [3., 4.]]) >>> linalg.inv(a) array([[-2. , 1. ], [ 1.5, -0.5]]) >>> np.dot(a, linalg.inv(a)) array([[ 1., 0.], [ 0., 1.]])",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\_basic.py",
    "ast_data": "FunctionDef name:inv arg:a arg:overwrite_a arg:check_finite arguments arg arg arg Assign Call If BoolOp Compare Call Compare Raise Call If Compare Assign Call Call Return return:yes Call Assign BoolOp Call Assign Call Assign Call If Compare Assign Call Assign Call Assign Call If Compare Raise Call If Compare Raise Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "get_well_known_url",
    "source_code": "def get_well_known_url(issuer, external=False, suffix='oauth-authorization-server'):\n    parsed = urlparse.urlparse(issuer)\n    path = parsed.path\n    if path and path != '/':\n        url_path = f'/.well-known/{suffix}{path}'\n    else:\n        url_path = f'/.well-known/{suffix}'\n    if not external:\n        return url_path\n    return parsed.scheme + '://' + parsed.netloc + url_path",
    "docstring": "Get well-known URI with issuer via _. .. _: :param issuer: URL of the issuer :param external: return full external url or not :param suffix: well-known URI suffix for RFC8414 :return: URL",
    "type": "function",
    "file_path": "authlib\\authlib\\oauth2\\rfc8414\\well_known.py",
    "ast_data": "FunctionDef name:get_well_known_url arg:issuer arg:external arg:suffix arguments arg arg arg Assign Call Assign If BoolOp Compare Assign Assign If Return return:yes Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "parameters",
    "source_code": "@abc.abstractmethod\ndef parameters(self) -> DSAParameters:\n    pass",
    "docstring": "The DSAParameters object associated with this public key.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py",
    "ast_data": "FunctionDef name:parameters arg:self arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_from_local_devices",
    "source_code": "@classmethod\ndef _from_local_devices(cls, devices, communication_options=None):\n    obj = cls(communication_options=communication_options)\n    obj.extended._initialize_local(tfconfig_cluster_resolver.TFConfigClusterResolver(), devices=devices)\n    return obj",
    "docstring": "A convenience method to create an object with a list of devices.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py",
    "ast_data": "FunctionDef name:_from_local_devices arg:cls arg:devices arg:communication_options arguments arg arg arg Assign Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "get_feature_names_out",
    "source_code": "def get_feature_names_out(self, input_features=None):\n    check_is_fitted(self, 'n_features_in_')\n    feature_names = _check_feature_names_in(self, input_features)\n    if self.target_type_ == 'multiclass':\n        feature_names = [f'{feature_name}_{class_name}' for feature_name in feature_names for class_name in self.classes_]\n        return np.asarray(feature_names, dtype=object)\n    else:\n        return feature_names",
    "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. is used unless it is not defined, in which case the following input feature names are generated: . When is \"multiclass\" the names are of the format '_'.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_target_encoder.py",
    "ast_data": "FunctionDef name:get_feature_names_out arg:self arg:input_features arguments arg arg Call Assign Call If Compare Assign Return return:yes Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_transform",
    "source_code": "def get_transform(self):\n    return self.aux_transform + self.ref_offset_transform + self.offset_transform",
    "docstring": "Return the :class: applied to the children",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py",
    "ast_data": "FunctionDef name:get_transform arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "find_coalesced_group",
    "source_code": "def find_coalesced_group(pg_name: str, entries: list[dict[str, Any]], _pg_guids: dict[tuple[str, int], str], rank: int) -> list[tuple[int, dict[str, Any]]]:\n    found = []\n    collective_seq_id = None\n    for i, e in enumerate(entries):\n        if _pg_guids[e['process_group'][0], rank] != pg_name:\n            continue\n        elif collective_seq_id is None:\n            collective_seq_id = e['p2p_seq_id'] if e['is_p2p'] else e['collective_seq_id']\n            found.append((i, e))\n        elif not e['is_p2p'] and e['collective_seq_id'] == collective_seq_id:\n            found.append((i, e))\n        elif e['is_p2p'] and e['p2p_seq_id'] == collective_seq_id:\n            found.append((i, e))\n        else:\n            break\n    if len(found) > 1:\n        assert found[-1][1]['profiling_name'] == 'nccl:coalesced'\n        return found\n    return []",
    "docstring": "Given a list of entries, if the collective_seq_id of the first entry matches that of subsequent ones, build an return a list of entries terminating in a 'coalesced' op entry all sharing a collective_seq_id",
    "type": "function",
    "file_path": "pytorch\\tools\\flight_recorder\\components\\utils.py",
    "ast_data": "FunctionDef name:find_coalesced_group arg:pg_name arg:entries arg:_pg_guids arg:rank arguments arg arg arg arg Assign Assign For Call If Compare If Compare Assign Call If BoolOp Compare Call If BoolOp Compare Call If Compare Call Compare Return return:yes Return return:no"
  },
  {
    "library": "numpy",
    "name": "__getstate__",
    "source_code": "def __getstate__(self):\n    cf = 'CF'[self.flags.fnc]\n    data_state = super().__reduce__()[2]\n    return data_state + (getmaskarray(self).tobytes(cf), self._fill_value)",
    "docstring": "Return the internal state of the masked array, for pickling purposes.",
    "type": "method",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:__getstate__ arg:self arguments arg Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_X",
    "source_code": "def _check_X(self, X, ensure_all_finite=True):\n    if not (hasattr(X, 'iloc') and getattr(X, 'ndim', 0) == 2):\n        X_temp = check_array(X, dtype=None, ensure_all_finite=ensure_all_finite)\n        if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_):\n            X = check_array(X, dtype=object, ensure_all_finite=ensure_all_finite)\n        else:\n            X = X_temp\n        needs_validation = False\n    else:\n        needs_validation = ensure_all_finite\n    n_samples, n_features = X.shape\n    X_columns = []\n    for i in range(n_features):\n        Xi = _safe_indexing(X, indices=i, axis=1)\n        Xi = check_array(Xi, ensure_2d=False, dtype=None, ensure_all_finite=needs_validation)\n        X_columns.append(Xi)\n    return (X_columns, n_samples, n_features)",
    "docstring": "Perform custom check_array: - convert list of strings to object dtype - check for missing values for object dtype data (check_array does not do that) - return list of features (arrays): this list of features is constructed feature by feature to preserve the data types of pandas DataFrame columns, as otherwise information is lost and cannot be used, e.g. for the attribute.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py",
    "ast_data": "FunctionDef name:_check_X arg:self arg:X arg:ensure_all_finite arguments arg arg arg If BoolOp Call Compare Call Assign Call If BoolOp Call Call Assign Call Assign Assign Assign Assign Assign For Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_load_initial_epoch_from_ckpt",
    "source_code": "def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode):\n    if self._training_state is not None:\n        return self._training_state.maybe_load_initial_epoch_from_ckpt(initial_epoch, mode)\n    return initial_epoch",
    "docstring": "Maybe load initial epoch from ckpt considering possible worker recovery. Refer to tensorflow/python/keras/distribute/worker_training_state.py for more information. Args: initial_epoch: The original initial_epoch user passes in in . mode: The mode for running . Returns: If the training is recovering from previous failure under multi-worker training setting, return the epoch the training is supposed to continue at. Otherwise, return the the user passes in.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py",
    "ast_data": "FunctionDef name:_maybe_load_initial_epoch_from_ckpt arg:self arg:initial_epoch arg:mode arguments arg arg arg If Compare Return return:yes Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "used_gradient_indices",
    "source_code": "def used_gradient_indices(formula: str) -> list[int]:\n    return [int(i) for i in re.findall(GRAD_INDEX_REGEX, formula)]",
    "docstring": "Determine a list of gradient indices (the i in grads[i]) that are used by the formula. >>> used_gradient_indices(\"foo(grads[0], grads[1])\") [0, 1]",
    "type": "function",
    "file_path": "pytorch\\tools\\autograd\\load_derivatives.py",
    "ast_data": "FunctionDef name:used_gradient_indices arg:formula arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "scale_losses_by_sample_weight",
    "source_code": "def scale_losses_by_sample_weight(losses, sample_weight):\n    losses = math_ops.cast(losses, dtypes.float32)\n    sample_weight = math_ops.cast(sample_weight, dtypes.float32)\n    losses, _, sample_weight = squeeze_or_expand_dimensions(losses, None, sample_weight)\n    return math_ops.multiply(losses, sample_weight)",
    "docstring": "Scales loss values by the given sample weights. dimensions are updated to match with the dimension of if possible by using squeeze/expand/broadcast. Args: losses: Loss tensor. sample_weight: Sample weights tensor. Returns: scaled by with dtype float32.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\util.py",
    "ast_data": "FunctionDef name:scale_losses_by_sample_weight arg:losses arg:sample_weight arguments arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "solve",
    "source_code": "def solve(self, y=0.0, discontinuity=True, extrapolate=None):\n    if extrapolate is None:\n        extrapolate = self.extrapolate\n    self._ensure_c_contiguous()\n    if np.issubdtype(self.c.dtype, np.complexfloating):\n        raise ValueError('Root finding is only for real-valued polynomials')\n    y = float(y)\n    r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, y, bool(discontinuity), bool(extrapolate))\n    if self.c.ndim == 2:\n        return r[0]\n    else:\n        r2 = np.empty(prod(self.c.shape[2:]), dtype=object)\n        for ii, root in enumerate(r):\n            r2[ii] = root\n        return r2.reshape(self.c.shape[2:])",
    "docstring": "Find real solutions of the equation `self.extrapolatediscont`: >>> import numpy as np >>> from scipy.interpolate import PPoly >>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2]) >>> pp.solve() array([-1., 1.])",
    "type": "method",
    "file_path": "scipy\\scipy\\interpolate\\_interpolate.py",
    "ast_data": "FunctionDef name:solve arg:self arg:y arg:discontinuity arg:extrapolate arguments arg arg arg arg If Compare Assign Call If Call Raise Call Assign Call Assign Call Call Call Call If Compare Return return:yes Assign Call Call For Call Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_stride_vars",
    "source_code": "def _stride_vars(self, index: Expr, vars: Sequence[sympy.Symbol], support_vars: Sequence[sympy.Symbol]) -> list[Expr]:\n    strides = []\n    index = self.simplify(index)\n    index = index - sympy_subs(index, {v: sympy.S.Zero for v in support_vars if v != 0})\n    for i in range(len(vars)):\n        index_dim = sympy_subs(index, {support_vars[j]: sympy.S.Zero for j in range(len(support_vars)) if vars[i] != support_vars[j] and support_vars[j] != 0})\n        v = vars[i]\n        if v == 0:\n            strides.append(sympy.S.Zero)\n        else:\n            strides.append(sympy_subs(index_dim, {v: sympy.S.One}) - sympy_subs(index_dim, {v: sympy.S.Zero}))\n    return strides",
    "docstring": "Convert an indexing expression back into strides NOTE: This is only valid if the index is a standard strided offset calculation. e.g. 10 * ModularIndexing(i0 + 1, 1, 2) would give a stride of -10 because the index wraps around after the first element",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:_stride_vars arg:self arg:index arg:vars arg:support_vars arguments arg arg arg arg Assign Assign Call Assign Call Compare For Call Call Assign Call Call Call BoolOp Compare Compare Assign If Compare Call Call Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_mover_key",
    "source_code": "@staticmethod\ndef _get_mover_key(feeder, handle):\n    return feeder.op.name + ';' + TensorHandle._get_reader_key(handle)",
    "docstring": "The graph key for mover.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py",
    "ast_data": "FunctionDef name:_get_mover_key arg:feeder arg:handle arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__rsub__",
    "source_code": "def __rsub__(self, other):\n    other = as_dimension(other)\n    if self._value is None or other.value is None:\n        return Dimension(None)\n    else:\n        return Dimension(other.value - self._value)",
    "docstring": "Returns the subtraction of from . Args: other: Another Dimension, or a value accepted by . Returns: A Dimension whose value is the subtraction of from .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py",
    "ast_data": "FunctionDef name:__rsub__ arg:self arg:other arguments arg arg Assign Call If BoolOp Compare Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "predict_step",
    "source_code": "def predict_step(self, data):\n    data = data_adapter.expand_1d(data)\n    x, _, _ = data_adapter.unpack_x_y_sample_weight(data)\n    return self(x, training=False)",
    "docstring": "The logic for one inference step. This method can be overridden to support custom inference logic. This method is called by . This method should contain the mathematical logic for one step of inference. This typically includes the forward pass. Configuration details for *how* this logic is run (e.g. and settings), should be left to , which can also be overridden. Args: data: A nested structure of s. Returns: The result of one inference step, typically the output of calling the on data.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py",
    "ast_data": "FunctionDef name:predict_step arg:self arg:data arguments arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_flatpages",
    "source_code": "@register.tag\ndef get_flatpages(parser, token):\n    bits = token.split_contents()\n    syntax_message = \"%(tag_name)s expects a syntax of %(tag_name)s ['url_starts_with'] [for user] as context_name\" % {'tag_name': bits[0]}\n    if 3 <= len(bits) <= 6:\n        if len(bits) % 2 == 0:\n            prefix = bits[1]\n        else:\n            prefix = None\n        if bits[-2] != 'as':\n            raise template.TemplateSyntaxError(syntax_message)\n        context_name = bits[-1]\n        if len(bits) >= 5:\n            if bits[-4] != 'for':\n                raise template.TemplateSyntaxError(syntax_message)\n            user = bits[-3]\n        else:\n            user = None\n        return FlatpageNode(context_name, starts_with=prefix, user=user)\n    else:\n        raise template.TemplateSyntaxError(syntax_message)",
    "docstring": "Retrieve all flatpage objects available for the current site and visible to the specific user (or visible to all users if no user is specified). Populate the template context with them in a variable whose name is defined by the ``, limits the returned flatpages to those beginning with a particular base URL. This argument can be a variable or a string, as it resolves from the template context. Syntax:: {% get_flatpages ['url_starts_with'] [for user] as context_name %} Example usage:: {% get_flatpages as flatpages %} {% get_flatpages for someuser as flatpages %} {% get_flatpages '/about/' as about_pages %} {% get_flatpages prefix as about_pages %} {% get_flatpages '/about/' for someuser as about_pages %}",
    "type": "function",
    "file_path": "django\\django\\contrib\\flatpages\\templatetags\\flatpages.py",
    "ast_data": "FunctionDef name:get_flatpages arg:parser arg:token arguments arg arg Assign Call Assign If Compare Call If Compare Call Assign Assign If Compare Raise Call Assign If Compare Call If Compare Raise Call Assign Assign Return return:yes Call Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_expand_labels",
    "source_code": "def _maybe_expand_labels(labels, predictions):\n    with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:\n        labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)\n        if isinstance(labels, sparse_tensor.SparseTensor):\n            return cond.cond(math_ops.equal(array_ops.rank(predictions), array_ops.size(labels.dense_shape) + 1), lambda: sparse_ops.sparse_reshape(labels, shape=array_ops.concat((labels.dense_shape, (1,)), 0), name=scope), lambda: labels)\n        labels_rank = labels.get_shape().ndims\n        if labels_rank is not None:\n            predictions_rank = predictions.get_shape().ndims\n            if predictions_rank is not None:\n                if predictions_rank == labels_rank:\n                    return labels\n                if predictions_rank == labels_rank + 1:\n                    return array_ops.expand_dims(labels, -1, name=scope)\n                raise ValueError(f'Unexpected labels shape {labels.get_shape()} for predictions shape {predictions.get_shape()}. Predictions rank should be the same rank as labels rank or labels rank plus one .')\n        return cond.cond(math_ops.equal(array_ops.rank(predictions), array_ops.rank(labels) + 1), lambda: array_ops.expand_dims(labels, -1, name=scope), lambda: labels)",
    "docstring": "If necessary, expand along last dimension to match . Args: labels: or with shape [D1, ... DN, num_labels] or [D1, ... DN]. The latter implies num_labels=1, in which case the result is an expanded with shape [D1, ... DN, 1]. predictions: with shape [D1, ... DN, num_classes]. Returns: with the same rank as . Raises: ValueError: if has invalid shape.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py",
    "ast_data": "FunctionDef name:_maybe_expand_labels arg:labels arg:predictions arguments arg arg With Call Assign Call If Call Return return:yes Call Call Call Call arguments Call Call arguments Assign Call If Compare Assign Call If Compare If Compare Return return:yes If Compare Return return:yes Call Raise Call Call Call Return return:yes Call Call Call Call arguments Call arguments"
  },
  {
    "library": "tensorflow",
    "name": "RandomUniform",
    "source_code": "class RandomUniform(Initializer):\n\n    def __init__(self, minval=-0.05, maxval=0.05, seed=None):\n        self.minval = minval\n        self.maxval = maxval\n        self.seed = seed\n        self._random_generator = _RandomGenerator(seed)\n\n    def __call__(self, shape, dtype=None, **kwargs):\n        _validate_kwargs(self.__class__.__name__, kwargs)\n        dtype = _get_dtype(dtype)\n        if not dtype.is_floating and (not dtype.is_integer):\n            raise ValueError('Expected float or integer dtype, got %s.' % dtype)\n        if _PARTITION_SHAPE in kwargs:\n            shape = kwargs[_PARTITION_SHAPE]\n        return self._random_generator.random_uniform(shape, self.minval, self.maxval, dtype)\n\n    def get_config(self):\n        return {'minval': self.minval, 'maxval': self.maxval, 'seed': self.seed}",
    "docstring": "Initializer that generates tensors with a uniform distribution. Also available via the shortcut function . Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.) >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: minval: A python scalar or a scalar tensor. Lower bound of the range of random values to generate (inclusive). maxval: A python scalar or a scalar tensor. Upper bound of the range of random values to generate (exclusive). seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py",
    "ast_data": "ClassDef name:RandomUniform FunctionDef name:__init__ arg:self arg:minval arg:maxval arg:seed arguments arg arg arg arg Assign Assign Assign Assign Call FunctionDef name:__call__ arg:self arg:shape arg:dtype arguments arg arg arg arg Call Assign Call If BoolOp Raise Call If Compare Assign Return return:yes Call FunctionDef name:get_config arg:self arguments arg Return return:yes"
  },
  {
    "library": "authlib",
    "name": "validate_contacts",
    "source_code": "def validate_contacts(self):\n    if 'contacts' in self and (not isinstance(self['contacts'], list)):\n        raise InvalidClaimError('contacts')",
    "docstring": "Array of strings representing ways to contact people responsible for this client, typically email addresses. The authorization server MAY make these contact addresses available to end-users for support requests for the client. See Section 6 for information on Privacy Considerations.",
    "type": "method",
    "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py",
    "ast_data": "FunctionDef name:validate_contacts arg:self arguments arg If BoolOp Compare Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "state_dict",
    "source_code": "def state_dict(self, *args, destination=None, prefix='', keep_vars=False):\n    if len(args) > 0:\n        warnings.warn('Positional args are being deprecated, use kwargs instead. Refer to https://pytorch.org/docs/main/generated/torch.nn.Module.html#torch.nn.Module.state_dict for details.', FutureWarning, stacklevel=2)\n        if destination is None:\n            destination = args[0]\n        if len(args) > 1 and prefix == '':\n            prefix = args[1]\n        if len(args) > 2 and keep_vars is False:\n            keep_vars = args[2]\n    if destination is None:\n        destination = OrderedDict()\n        destination._metadata = OrderedDict()\n    local_metadata = dict(version=self._version)\n    if hasattr(destination, '_metadata'):\n        destination._metadata[prefix[:-1]] = local_metadata\n    for hook in self._state_dict_pre_hooks.values():\n        hook(self, prefix, keep_vars)\n    self._save_to_state_dict(destination, prefix, keep_vars)\n    for name, module in self._modules.items():\n        if module is not None:\n            module.state_dict(destination=destination, prefix=prefix + name + '.', keep_vars=keep_vars)\n    for hook in self._state_dict_hooks.values():\n        hook_result = hook(self, destination, prefix, local_metadata)\n        if not getattr(hook, '_from_public_api', False):\n            if hook_result is not None:\n                destination = hook_result\n        elif hook_result is not None:\n            raise RuntimeError('state_dict post-hook must return None')\n    return destination",
    "docstring": "Return a dictionary containing references to the whole state of the module. Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names. Parameters and buffers set to `~torch.Tensor`. Returns: dict: a dictionary containing a whole state of the module Example:: >>> # xdoctest: +SKIP(\"undefined vars\") >>> module.state_dict().keys() ['bias', 'weight']",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:state_dict arg:self arguments arg arg arg arg arg If Compare Call Call If Compare Assign If BoolOp Compare Call Compare Assign If BoolOp Compare Call Compare Assign If Compare Assign Call Assign Call Assign Call If Call Assign For Call Call Call For Call If Compare Call For Call Assign Call If Call If Compare Assign If Compare Raise Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "bin_path",
    "source_code": "@classmethod\ndef bin_path(cls):\n    return str(mpl.rcParams[cls._exec_key])",
    "docstring": "Return the binary path to the commandline tool used by a specific subclass. This is a class method so that the tool can be looked for before making a particular MovieWriter subclass available.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\animation.py",
    "ast_data": "FunctionDef name:bin_path arg:cls arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "ensure_scope",
    "source_code": "def ensure_scope(level: int, global_dict=None, local_dict=None, resolvers=(), target=None) -> Scope:\n    return Scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target)",
    "docstring": "Ensure that we are grabbing the correct scope.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\computation\\scope.py",
    "ast_data": "FunctionDef name:ensure_scope arg:level arg:global_dict arg:local_dict arg:resolvers arg:target arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "clone_metrics",
    "source_code": "def clone_metrics(metrics):\n    return nest.map_structure(clone_metric, metrics)",
    "docstring": "Clones the given metric list/dict.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py",
    "ast_data": "FunctionDef name:clone_metrics arg:metrics arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "set_subplotspec",
    "source_code": "def set_subplotspec(self, subplotspec):\n    self._subplotspec = subplotspec\n    self._set_position(subplotspec.get_position(self.get_figure(root=False)))",
    "docstring": "Set the . associated with the subplot.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:set_subplotspec arg:self arg:subplotspec arguments arg arg Assign Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "recover_original_precision_folded_computation_ops",
    "source_code": "def recover_original_precision_folded_computation_ops(gm):\n    graph = gm.graph\n    for target, idx in ((aten.convolution.default, (1, 2)), (aten.addmm.default, (0, 2)), (aten.mm.default, (1,))):\n        for node in graph.find_nodes(op='call_function', target=target):\n            orig_dtype = node.meta.get('_allow_mixed_dtype_folding', None)\n            if orig_dtype is None:\n                continue\n            with graph.inserting_before(node):\n                for i in idx:\n                    old_input = node.args[i]\n                    if old_input is None:\n                        continue\n                    new_input = graph.create_node('call_function', prims.convert_element_type.default, (old_input, orig_dtype))\n                    node.replace_input_with(old_input, new_input)",
    "docstring": "After binary folding conv/linear weights and biases to a higher dtype, recover the original precision they were in.",
    "type": "function",
    "file_path": "pytorch\\torch\\_inductor\\fx_passes\\binary_folding.py",
    "ast_data": "FunctionDef name:recover_original_precision_folded_computation_ops arg:gm arguments arg Assign For For Call Assign Call If Compare With Call For Assign If Compare Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "add_exit_callback_to_default_func_graph",
    "source_code": "def add_exit_callback_to_default_func_graph(fn) -> None:\n    default_graph = get_default_graph()\n    if not default_graph._building_function:\n        raise RuntimeError('Cannot add scope exit callbacks when not building a function.  Default graph: {}'.format(default_graph))\n    default_graph._add_scope_exit_callback(fn)",
    "docstring": "Add a callback to run when the default function graph goes out of scope. Usage: Args: fn: A callable that takes no arguments and whose output is ignored. To be executed when exiting func graph scope. Raises: RuntimeError: If executed when the current default graph is not a FuncGraph, or not currently executing in function creation mode (e.g., if inside an init_scope).",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:add_exit_callback_to_default_func_graph arg:fn arguments arg Assign Call If Raise Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_set_wise_labels",
    "source_code": "def _check_set_wise_labels(y_true, y_pred, average, labels, pos_label):\n    average_options = (None, 'micro', 'macro', 'weighted', 'samples')\n    if average not in average_options and average != 'binary':\n        raise ValueError('average has to be one of ' + str(average_options))\n    y_true, y_pred = attach_unique(y_true, y_pred)\n    y_type, y_true, y_pred = _check_targets(y_true, y_pred)\n    present_labels = _tolist(unique_labels(y_true, y_pred))\n    if average == 'binary':\n        if y_type == 'binary':\n            if pos_label not in present_labels:\n                if len(present_labels) >= 2:\n                    raise ValueError(f'pos_label={pos_label} is not a valid label. It should be one of {present_labels}')\n            labels = [pos_label]\n        else:\n            average_options = list(average_options)\n            if y_type == 'multiclass':\n                average_options.remove('samples')\n            raise ValueError(\"Target is %s but average='binary'. Please choose another average setting, one of %r.\" % (y_type, average_options))\n    elif pos_label not in (None, 1):\n        warnings.warn(\"Note that pos_label (set to %r) is ignored when average != 'binary' (got %r). You may use labels=[pos_label] to specify a single positive class.\" % (pos_label, average), UserWarning)\n    return labels",
    "docstring": "Validation associated with set-wise metrics. Returns identified labels.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\metrics\\_classification.py",
    "ast_data": "FunctionDef name:_check_set_wise_labels arg:y_true arg:y_pred arg:average arg:labels arg:pos_label arguments arg arg arg arg arg Assign If BoolOp Compare Compare Raise Call Call Assign Call Assign Call Assign Call Call If Compare If Compare If Compare If Compare Call Raise Call Assign Assign Call If Compare Call Raise Call If Compare Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict_proba",
    "source_code": "def predict_proba(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, reset=False)\n    _, log_resp = self._estimate_log_prob_resp(X)\n    return np.exp(log_resp)",
    "docstring": "Evaluate the components' density for each sample. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- resp : array, shape (n_samples, n_components) Density of each Gaussian component for each sample in X.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\mixture\\_base.py",
    "ast_data": "FunctionDef name:predict_proba arg:self arg:X arguments arg arg Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "get_table_list",
    "source_code": "def get_table_list(self, cursor):\n    cursor.execute(\"\\n            SELECT\\n                c.relname,\\n                CASE\\n                    WHEN c.relispartition THEN 'p'\\n                    WHEN c.relkind IN ('m', 'v') THEN 'v'\\n                    ELSE 't'\\n                END,\\n                obj_description(c.oid, 'pg_class')\\n            FROM pg_catalog.pg_class c\\n            LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\\n            WHERE c.relkind IN ('f', 'm', 'p', 'r', 'v')\\n                AND n.nspname NOT IN ('pg_catalog', 'pg_toast')\\n                AND pg_catalog.pg_table_is_visible(c.oid)\\n        \")\n    return [TableInfo(*row) for row in cursor.fetchall() if row[0] not in self.ignored_tables]",
    "docstring": "Return a list of table and view names in the current database.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\postgresql\\introspection.py",
    "ast_data": "FunctionDef name:get_table_list arg:self arg:cursor arguments arg arg Call Return return:yes Call Call Compare"
  },
  {
    "library": "scikit-learn",
    "name": "sort",
    "source_code": "def sort(x: Array, /, *, axis: int=-1, descending: py_bool=False, stable: py_bool=True) -> Array:\n    x, restore = _ensure_single_chunk(x, axis)\n    meta_xp = array_namespace(x._meta)\n    x = da.map_blocks(meta_xp.sort, x, axis=axis, meta=x._meta, dtype=x.dtype, descending=descending, stable=stable)\n    return restore(x)",
    "docstring": "Array API compatibility layer around the lack of sort() in Dask. Warnings -------- This function temporarily rechunks the array along to a single chunk. This can be extremely inefficient and can lead to out-of-memory errors. See the corresponding documentation in the array library and/or the array API specification for more details.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_aliases.py",
    "ast_data": "FunctionDef name:sort arguments arg arg arg arg Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "cryptography",
    "name": "load_public",
    "source_code": "def load_public(self, data: memoryview) -> tuple[ed25519.Ed25519PublicKey, memoryview]:\n    public_key, data = _lookup_kformat(_SSH_ED25519).load_public(data)\n    _, data = load_application(data)\n    return (public_key, data)",
    "docstring": "Make Ed25519 public key from data.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:load_public arg:self arg:data arguments arg arg Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    check_is_fitted(self)\n    X = validate_data(self, X, reset=False, accept_sparse='csr')\n    if not hasattr(self, 'cluster_centers_'):\n        raise ValueError(\"Predict method is not supported when affinity='precomputed'.\")\n    if self.cluster_centers_.shape[0] > 0:\n        with config_context(assume_finite=True):\n            return pairwise_distances_argmin(X, self.cluster_centers_)\n    else:\n        warnings.warn(\"This model does not have any cluster centers because affinity propagation did not converge. Labeling every sample as '-1'.\", ConvergenceWarning)\n        return np.array([-1] * X.shape[0])",
    "docstring": "Predict the closest cluster each sample in X belongs to. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data to predict. If a sparse matrix is provided, it will be converted into a sparse ``. Returns ------- labels : ndarray of shape (n_samples,) Cluster labels.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_affinity_propagation.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Call Assign Call If Call Raise Call If Compare With Call Return return:yes Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "types",
    "source_code": "def types(self):\n    attr_types = [self._attributes[name].type_name for name in self._attributes]\n    return attr_types",
    "docstring": "Return the list of attribute types. Returns ------- attr_types : list of str The attribute types.",
    "type": "method",
    "file_path": "scipy\\scipy\\io\\arff\\_arffread.py",
    "ast_data": "FunctionDef name:types arg:self arguments arg Assign Return return:yes"
  },
  {
    "library": "pygame",
    "name": "note_off",
    "source_code": "def note_off(self, note, velocity=0, channel=0):\n    if not 0 <= channel <= 15:\n        raise ValueError('Channel not between 0 and 15.')\n    self.write_short(128 + channel, note, velocity)",
    "docstring": "turns a midi note off. Note must be on. Output.note_off(note, velocity=0, channel=0) note is an integer from 0 to 127 velocity is an integer from 0 to 127 (release velocity) channel is an integer from 0 to 15 Turn a note off in the output stream. The note must already be on for this to work correctly.",
    "type": "method",
    "file_path": "pygame\\src_py\\midi.py",
    "ast_data": "FunctionDef name:note_off arg:self arg:note arg:velocity arg:channel arguments arg arg arg arg If Compare Raise Call Call"
  },
  {
    "library": "kornia",
    "name": "__init__",
    "source_code": "def __init__(self, custom_config: Optional[AdalamConfig]=None) -> None:\n    if custom_config is not None:\n        self.config = custom_config\n    else:\n        self.config = get_adalam_default_config()",
    "docstring": "Wrap the method AdaLAM for outlier filtering. init args: custom_config: dictionary overriding the default configuration. Missing parameters are kept as default. See documentation of DEFAULT_CONFIG for specific explanations on the accepted parameters.",
    "type": "method",
    "file_path": "kornia\\kornia\\feature\\adalam\\adalam.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:custom_config arguments arg arg If Compare Assign Assign Call"
  },
  {
    "library": "pandas",
    "name": "maybe_prepare_scalar_for_op",
    "source_code": "def maybe_prepare_scalar_for_op(obj, shape: Shape):\n    if type(obj) is datetime.timedelta:\n        return Timedelta(obj)\n    elif type(obj) is datetime.datetime:\n        return Timestamp(obj)\n    elif isinstance(obj, np.datetime64):\n        if isna(obj):\n            from pandas.core.arrays import DatetimeArray\n            if is_unitless(obj.dtype):\n                obj = obj.astype('datetime64[ns]')\n            elif not is_supported_dtype(obj.dtype):\n                new_dtype = get_supported_dtype(obj.dtype)\n                obj = obj.astype(new_dtype)\n            right = np.broadcast_to(obj, shape)\n            return DatetimeArray._simple_new(right, dtype=right.dtype)\n        return Timestamp(obj)\n    elif isinstance(obj, np.timedelta64):\n        if isna(obj):\n            from pandas.core.arrays import TimedeltaArray\n            if is_unitless(obj.dtype):\n                obj = obj.astype('timedelta64[ns]')\n            elif not is_supported_dtype(obj.dtype):\n                new_dtype = get_supported_dtype(obj.dtype)\n                obj = obj.astype(new_dtype)\n            right = np.broadcast_to(obj, shape)\n            return TimedeltaArray._simple_new(right, dtype=right.dtype)\n        return Timedelta(obj)\n    elif isinstance(obj, np.integer):\n        return int(obj)\n    elif isinstance(obj, np.floating):\n        return float(obj)\n    return obj",
    "docstring": "Cast non-pandas objects to pandas types to unify behavior of arithmetic and comparison operations. Parameters ---------- obj: object shape : tuple[int] Returns ------- out : object Notes ----- Be careful to call this *after* determining the attribute to be attached to the result of the arithmetic operation.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:maybe_prepare_scalar_for_op arg:obj arg:shape arguments arg arg If Compare Call Return return:yes Call If Compare Call Return return:yes Call If Call If Call If Call Assign Call If Call Assign Call Assign Call Assign Call Return return:yes Call Return return:yes Call If Call If Call If Call Assign Call If Call Assign Call Assign Call Assign Call Return return:yes Call Return return:yes Call If Call Return return:yes Call If Call Return return:yes Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "merge_subcluster",
    "source_code": "def merge_subcluster(self, nominee_cluster, threshold):\n    new_ss = self.squared_sum_ + nominee_cluster.squared_sum_\n    new_ls = self.linear_sum_ + nominee_cluster.linear_sum_\n    new_n = self.n_samples_ + nominee_cluster.n_samples_\n    new_centroid = 1 / new_n * new_ls\n    new_sq_norm = np.dot(new_centroid, new_centroid)\n    sq_radius = new_ss / new_n - new_sq_norm\n    if sq_radius <= threshold ** 2:\n        self.n_samples_, self.linear_sum_, self.squared_sum_, self.centroid_, self.sq_norm_ = (new_n, new_ls, new_ss, new_centroid, new_sq_norm)\n        return True\n    return False",
    "docstring": "Check if a cluster is worthy enough to be merged. If yes then merge.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\cluster\\_birch.py",
    "ast_data": "FunctionDef name:merge_subcluster arg:self arg:nominee_cluster arg:threshold arguments arg arg arg Assign Assign Assign Assign Assign Call Assign If Compare Assign Return return:yes Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "dtype",
    "source_code": "@property\ndef dtype(self):\n    return self._type_spec._dtype",
    "docstring": "Returns the symbolically inferred for this Keras output.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py",
    "ast_data": "FunctionDef name:dtype arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "decision_function",
    "source_code": "@available_if(_estimator_has('decision_function', delegates=('estimators_', 'estimator')))\ndef decision_function(self, X, **params):\n    _raise_for_params(params, self, 'decision_function')\n    check_is_fitted(self)\n    X = validate_data(self, X, accept_sparse=['csr', 'csc'], dtype=None, ensure_all_finite=False, reset=False)\n    if _routing_enabled():\n        routed_params = process_routing(self, 'decision_function', **params)\n    else:\n        routed_params = Bunch()\n        routed_params.estimator = Bunch(decision_function=Bunch())\n    n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)\n    all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)((delayed(_parallel_decision_function)(self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, params=routed_params.estimator.decision_function) for i in range(n_jobs)))\n    decisions = sum(all_decisions) / self.n_estimators\n    return decisions",
    "docstring": "Average of the decision functions of the base classifiers. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. **params : dict Parameters routed to the method of the sub-estimators via the metadata routing API. .. versionadded:: 1.7 Only available if is set. See :ref: for more details. Returns ------- score : ndarray of shape (n_samples, k) The decision function of the input samples. The columns correspond to the classes in sorted order, as they appear in the attribute ``.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\ensemble\\_bagging.py",
    "ast_data": "FunctionDef name:decision_function arg:self arg:X arguments arg arg arg Call Call Assign Call If Call Assign Call Assign Call Assign Call Call Assign Call Assign Call Call Call Call Call Assign Call Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y=None):\n    X = validate_data(self, X, ensure_min_features=2, ensure_min_samples=2)\n    if self.covariance == 'precomputed':\n        emp_cov = X.copy()\n        self.location_ = np.zeros(X.shape[1])\n    else:\n        emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)\n        if self.assume_centered:\n            self.location_ = np.zeros(X.shape[1])\n        else:\n            self.location_ = X.mean(0)\n    self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso(emp_cov, alpha=self.alpha, cov_init=None, mode=self.mode, tol=self.tol, enet_tol=self.enet_tol, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps)\n    return self",
    "docstring": "Fit the GraphicalLasso model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Data from which to compute the covariance estimate. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\covariance\\_graph_lasso.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Assign Call If Compare Assign Call Assign Call Assign Call If Assign Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "start_single_process",
    "source_code": "def start_single_process(self, task_type, task_id, cluster_spec=None, fn=None, args=None, kwargs=None):\n    with self._process_lock:\n        if self._joined:\n            raise ValueError('cannot start new processes afterMultiProcessRunner.join() is called')\n        self._start_subprocess_and_reading_thread(task_type, task_id, cluster_spec=cluster_spec, fn=fn, args=args or (), kwargs=kwargs or {})",
    "docstring": "Starts a single process. This starts a process in the cluster with the task type, task id, and the process function (). If process function is , the function provided at will be used. If is , the cluster spec provided at will be used. TODO(rchao): It is meant that all subprocesses will be updated with the new cluster spec, but this has yet to be implemented. At this time only the newly started subprocess picks up this updated cluster spec. Args: task_type: The task type. task_id: The task id. cluster_spec: The cluster spec to be used on the newly started process. If , the cluster spec provided at will be used. fn: The process function to be run on the newly started process. If specified, specify and as well. If , the function provided at will be used. args: Optional positional arguments to be supplied in . kwargs: Optional keyword arguments to be supplied in .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_process_runner.py",
    "ast_data": "FunctionDef name:start_single_process arg:self arg:task_type arg:task_id arg:cluster_spec arg:fn arg:args arg:kwargs arguments arg arg arg arg arg arg arg With If Raise Call Call BoolOp BoolOp"
  },
  {
    "library": "tensorflow",
    "name": "_logical_and",
    "source_code": "def _logical_and(*args):\n    args_ = [_static_value(x) for x in args]\n    if any((x is not None and (not bool(x)) for x in args_)):\n        return constant_op.constant(False)\n    if all((x is not None and bool(x) for x in args_)):\n        return constant_op.constant(True)\n    if len(args) == 2:\n        return math_ops.logical_and(*args)\n    return math_ops.reduce_all(args)",
    "docstring": "Convenience function which attempts to statically .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\transformed_distribution.py",
    "ast_data": "FunctionDef name:_logical_and arguments arg Assign Call If Call BoolOp Compare Call Return return:yes Call If Call BoolOp Compare Call Return return:yes Call If Compare Call Return return:yes Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "_check_prepopulated_fields_value",
    "source_code": "def _check_prepopulated_fields_value(self, obj, val, label):\n    if not isinstance(val, (list, tuple)):\n        return must_be('a list or tuple', option=label, obj=obj, id='admin.E029')\n    else:\n        return list(chain.from_iterable((self._check_prepopulated_fields_value_item(obj, subfield_name, '%s[%r]' % (label, index)) for index, subfield_name in enumerate(val))))",
    "docstring": "Check a value of dictionary, i.e. it's an iterable of existing fields.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\checks.py",
    "ast_data": "FunctionDef name:_check_prepopulated_fields_value arg:self arg:obj arg:val arg:label arguments arg arg arg arg If Call Return return:yes Call Return return:yes Call Call Call Call"
  },
  {
    "library": "scipy",
    "name": "_onenormest_matrix_power",
    "source_code": "def _onenormest_matrix_power(A, p, t=2, itmax=5, compute_v=False, compute_w=False):\n    from scipy.sparse.linalg._onenormest import onenormest\n    return onenormest(aslinearoperator(A) ** p)",
    "docstring": "Efficiently estimate the 1-norm of A^p. Parameters ---------- A : ndarray Matrix whose 1-norm of a power is to be computed. p : int Non-negative integer power. t : int, optional A positive parameter controlling the tradeoff between accuracy versus time and memory usage. Larger values take longer and use more memory but give more accurate output. itmax : int, optional Use at most this many iterations. compute_v : bool, optional Request a norm-maximizing linear operator input vector if True. compute_w : bool, optional Request a norm-maximizing linear operator output vector if True. Returns ------- est : float An underestimate of the 1-norm of the sparse matrix. v : ndarray, optional The vector such that ||Av||_1 == est*||v||_1. It can be thought of as an input to the linear operator that gives an output with particularly large norm. w : ndarray, optional The vector Av which has relatively large 1-norm. It can be thought of as an output of the linear operator that is relatively large in norm compared to the input.",
    "type": "function",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_expm_multiply.py",
    "ast_data": "FunctionDef name:_onenormest_matrix_power arg:A arg:p arg:t arg:itmax arg:compute_v arg:compute_w arguments arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "seaborn",
    "name": "update_units",
    "source_code": "def update_units(self, x):\n    self.converter = mpl.units.registry.get_converter(x)\n    if self.converter is not None:\n        self.converter.default_units(x, self)\n        info = self.converter.axisinfo(self.units, self)\n        if info is None:\n            return\n        if info.majloc is not None:\n            self.set_major_locator(info.majloc)\n        if info.majfmt is not None:\n            self.set_major_formatter(info.majfmt)",
    "docstring": "Pass units to the internal converter, potentially updating its mapping.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\scales.py",
    "ast_data": "FunctionDef name:update_units arg:self arg:x arguments arg arg Assign Call If Compare Call Assign Call If Compare Return return:no If Compare Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "fuse",
    "source_code": "def fuse(model: torch.nn.Module, inplace=False, no_trace=False) -> torch.nn.Module:\n    patterns = [(nn.Conv1d, nn.BatchNorm1d), (nn.Conv2d, nn.BatchNorm2d), (nn.Conv3d, nn.BatchNorm3d), (nn.Linear, nn.BatchNorm1d)]\n    if not inplace:\n        model = copy.deepcopy(model)\n    if not no_trace or not isinstance(model, torch.fx.GraphModule):\n        fx_model = fx.symbolic_trace(model)\n    else:\n        fx_model = model\n    modules = dict(fx_model.named_modules())\n    new_graph = copy.deepcopy(fx_model.graph)\n    for pattern in patterns:\n        for node in new_graph.nodes:\n            if matches_module_pattern(pattern, node, modules):\n                if len(node.args[0].users) > 1:\n                    continue\n                first_layer = modules[node.args[0].target]\n                bn = modules[node.target]\n                if not bn.track_running_stats:\n                    continue\n                if pattern[0] in [nn.Conv1d, nn.Conv2d, nn.Conv3d]:\n                    fused_layer = fuse_conv_bn_eval(first_layer, bn)\n                else:\n                    fused_layer = fuse_linear_bn_eval(first_layer, bn)\n                replace_node_module(node.args[0], modules, fused_layer)\n                node.replace_all_uses_with(node.args[0])\n                new_graph.erase_node(node)\n    return fx.GraphModule(fx_model, new_graph)",
    "docstring": "Fuses convolution/BN and linear/BN layers for inference purposes. Will deepcopy your model by default, but can modify the model inplace as well.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\optimization.py",
    "ast_data": "FunctionDef name:fuse arg:model arg:inplace arg:no_trace arguments arg arg arg Assign If Assign Call If BoolOp Call Assign Call Assign Assign Call Call Assign Call For For If Call If Compare Call Assign Assign If If Compare Assign Call Assign Call Call Call Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_call_if_callable",
    "source_code": "def _call_if_callable(self, param):\n    return param() if callable(param) else param",
    "docstring": "Call the function if param is callable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\training\\optimizer.py",
    "ast_data": "FunctionDef name:_call_if_callable arg:self arg:param arguments arg arg Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "measure_forward_pass",
    "source_code": "def measure_forward_pass(sparse_model_metadata, device, sparse_dlrm, **batch):\n    time_taken_dict: dict[str, list] = {'norm': [], 'sparse_block_shape': [], 'sparsity_level': [], 'time_taken': []}\n    metadata = pd.read_csv(sparse_model_metadata)\n    for _, row in metadata.iterrows():\n        norm, sbs, sl = (row['norm'], row['sparse_block_shape'], row['sparsity_level'])\n        model_path = row['path']\n        model = fetch_model(model_path, device, sparse_dlrm=sparse_dlrm)\n        time_taken = run_forward(model, **batch)\n        out_str = f'{norm}_{sbs}_{sl}={time_taken}'\n        print(out_str)\n        time_taken_dict['norm'].append(norm)\n        time_taken_dict['sparse_block_shape'].append(sbs)\n        time_taken_dict['sparsity_level'].append(sl)\n        time_taken_dict['time_taken'].append(time_taken)\n    time_df = pd.DataFrame(time_taken_dict)\n    if sparse_dlrm:\n        time_df['dlrm_type'] = 'with_torch_sparse'\n    else:\n        time_df['dlrm_type'] = 'without_torch_sparse'\n    return time_df",
    "docstring": "Measures and tracks the forward pass of the model for all the sparsity levels, block shapes and norms available in sparse_model_metadata file. If sparse_dlrm=True, then the SparseDLRM model is loaded, otherwise the standard one is.",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\evaluate_forward_time.py",
    "ast_data": "FunctionDef name:measure_forward_pass arg:sparse_model_metadata arg:device arg:sparse_dlrm arguments arg arg arg arg Assign Call For Call Assign Assign Assign Call Assign Call Assign Call Call Call Call Call Assign Call If Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, func, Tout, is_grad_func):\n    self._func = func\n    self._out_dtypes = Tout\n    self._is_grad_func = is_grad_func\n    self._support_graph_mode_gradient = False",
    "docstring": "Constructs an EagerFunc. Args: func: The function to wrap. Tout: A list of datatypes for the output; an empty list if the output is None. is_grad_func: Whether this EagerFunc is the gradient of another EagerPyFunc.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:func arg:Tout arg:is_grad_func arguments arg arg arg arg Assign Assign Assign Assign"
  },
  {
    "library": "pytorch",
    "name": "reconstruct_outputs",
    "source_code": "def reconstruct_outputs(self) -> OutputType:\n    if not self.cached_tensor_outputs:\n        self._initialize_cached_tensors()\n    outputs: OutputType = []\n    for i, (storage_info, metadata) in enumerate(zip(self.output_storage_alias, self.outputs_metadata)):\n        if not isinstance(metadata, dict):\n            assert isinstance(metadata, (int, type(None)))\n            outputs.append(metadata)\n            continue\n        cached_t = self.cached_tensor_outputs[i]\n        if cached_t is not None:\n            if cached_t._backward_hooks is not None:\n                cached_t._backward_hooks = None\n            outputs.append(cached_t)\n            continue\n        static_t = self.static_output_tensors[i]\n        if static_t is not None:\n            assert self.outputs_weakrefs[i] is None\n            outputs.append(static_t)\n            continue\n        storage = self.prepare_alias_info_for_tensor_construction(storage_info, metadata)\n        if isinstance(storage, UntypedStorage) or storage is None:\n            out = self._reconstruct_from_tensor_metadata(metadata, storage)\n        else:\n            assert isinstance(storage, int)\n            out = self._reconstruct_from_tensor_metadata(metadata, cast(torch.Tensor, outputs[storage]).untyped_storage())\n        outputs.append(out)\n        w = self.outputs_weakrefs[i]\n        assert w is not None\n        w.swap_weakref(out.untyped_storage()._weak_ref())\n    return outputs",
    "docstring": "Reconstruct output tensors according to their saved metadata and alias information",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py",
    "ast_data": "FunctionDef name:reconstruct_outputs arg:self arguments arg If Call For Call Call If Call Call Call Call Assign If Compare If Compare Assign Call Assign If Compare Compare Call Assign Call If BoolOp Call Compare Assign Call Call Assign Call Call Call Call Assign Compare Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "add_subpackage",
    "source_code": "def add_subpackage(self, subpackage_name, subpackage_path=None, standalone=False):\n    if standalone:\n        parent_name = None\n    else:\n        parent_name = self.name\n    config_list = self.get_subpackage(subpackage_name, subpackage_path, parent_name=parent_name, caller_level=2)\n    if not config_list:\n        self.warn('No configuration returned, assuming unavailable.')\n    for config in config_list:\n        d = config\n        if isinstance(config, Configuration):\n            d = config.todict()\n        assert isinstance(d, dict), repr(type(d))\n        self.info('Appending %s configuration to %s' % (d.get('name'), self.name))\n        self.dict_append(**d)\n    dist = self.get_distribution()\n    if dist is not None:\n        self.warn('distutils distribution has been initialized, it may be too late to add a subpackage ' + subpackage_name)",
    "docstring": "Add a sub-package to the current Configuration instance. This is useful in a setup.py script for adding sub-packages to a package. Parameters ---------- subpackage_name : str name of the subpackage subpackage_path : str if given, the subpackage path such as the subpackage is in subpackage_path / subpackage_name. If None,the subpackage is assumed to be located in the local path / subpackage_name. standalone : bool",
    "type": "method",
    "file_path": "numpy\\numpy\\distutils\\misc_util.py",
    "ast_data": "FunctionDef name:add_subpackage arg:self arg:subpackage_name arg:subpackage_path arg:standalone arguments arg arg arg arg If Assign Assign Assign Call If Call For Assign If Call Assign Call Call Call Call Call Call Call Assign Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "eventTreeTraversal",
    "source_code": "def eventTreeTraversal(self):\n    yield from traverse_dfs(self.event_tree)",
    "docstring": "Traverse the event tree and yield all events. Override this method in subclass to customize the traversal.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\_pattern_matcher.py",
    "ast_data": "FunctionDef name:eventTreeTraversal arg:self arguments arg Call"
  },
  {
    "library": "django",
    "name": "errors",
    "source_code": "@property\ndef errors(self):\n    return self.form.errors.get(self.name, self.form.error_class(renderer=self.form.renderer))",
    "docstring": "Return an ErrorList (empty if there are no errors) for this field.",
    "type": "method",
    "file_path": "django\\django\\forms\\boundfield.py",
    "ast_data": "FunctionDef name:errors arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_get_first_singular_vectors_svd",
    "source_code": "def _get_first_singular_vectors_svd(X, y):\n    C = np.dot(X.T, y)\n    U, _, Vt = svd(C, full_matrices=False)\n    return (U[:, 0], Vt[0, :])",
    "docstring": "Return the first left and right singular vectors of X'y. Here the whole SVD is computed.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\cross_decomposition\\_pls.py",
    "ast_data": "FunctionDef name:_get_first_singular_vectors_svd arg:X arg:y arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "define_buffer",
    "source_code": "def define_buffer(self, name, sizes: list[Any], dtype=torch.float) -> str:\n    sizes = parse_expr_with_index_symbols(sizes)\n    buf = ir.Buffer(name=name, layout=ir.FixedLayout(torch.device('cpu'), dtype, sizes))\n    self.local_buffers[name] = buf\n    ctype = f'{DTYPE_TO_CPP[dtype]}'\n    numel = f'{cexpr_index(buf.get_numel())}'\n    return f'auto _{name} = std::make_unique<{ctype}[]>({numel}); auto {name} = _{name}.get();'",
    "docstring": "Define kernel local buffer",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_template_kernel.py",
    "ast_data": "FunctionDef name:define_buffer arg:self arg:name arg:sizes arg:dtype arguments arg arg arg arg Assign Call Assign Call Call Call Assign Assign Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_ungroup_and_make_mirrored",
    "source_code": "def _ungroup_and_make_mirrored(grouped_reduced, destinations, reduce_op, num_between_graph_workers=1):\n    num_replicas = len(get_devices_from(destinations)) * num_between_graph_workers\n    index = [[] for _ in range(len(grouped_reduced[0]))]\n    for per_replica_reduced in grouped_reduced:\n        for i, (v, _) in enumerate(per_replica_reduced):\n            if reduce_op == reduce_util.ReduceOp.MEAN:\n                with ops.device(v.device):\n                    index[i].append(v / num_replicas)\n            else:\n                index[i].append(v)\n    return [distribute_utils.regroup(v, wrap_class=value_lib.Mirrored) for v in index]",
    "docstring": "Ungroup results from all-reduce and make Mirrored objects. Each all-reduce result will be divided by the number of destinations before Mirrored objects are created if reduce_op is \"mean\". Args: grouped_reduced: a list of lists, each sublist has components for each device, paired with a None. It is the result from cross_device_utils.aggregate_gradients_using*. destinations: a value to colocate the result with. reduce_op: Indicates how values will be aggregated. Accepted values are , . num_between_graph_workers: number of workers in the between-graph replication. Returns: a list of Mirrored objects.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py",
    "ast_data": "FunctionDef name:_ungroup_and_make_mirrored arg:grouped_reduced arg:destinations arg:reduce_op arg:num_between_graph_workers arguments arg arg arg arg Assign Call Call Assign Call Call For For Call If Compare With Call Call Call Return return:yes Call"
  },
  {
    "library": "kornia",
    "name": "make_samplers",
    "source_code": "def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None:\n    gain = _range_bound(self.gain, 'gain').to(device, dtype)\n    self.gain_sampler = UniformDistribution(gain[0], gain[1], validate_args=False)\n    sign = _range_bound(self.sign, 'sign', bounds=(-1.0, 1.0), center=0.0).to(device, dtype)\n    self.sign_sampler = UniformDistribution(sign[0], sign[1], validate_args=False)\n    self.directions_sampler = UniformDistribution(0, 4, validate_args=False)",
    "docstring": "Create samplers for generating random gaussian illumination parameters.",
    "type": "method",
    "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\linear_illumination.py",
    "ast_data": "FunctionDef name:make_samplers arg:self arg:device arg:dtype arguments arg arg arg Assign Call Call Assign Call Assign Call Call Assign Call Assign Call"
  },
  {
    "library": "seaborn",
    "name": "Shift",
    "source_code": "@dataclass\nclass Shift(Move):\n    x: float = 0\n    y: float = 0\n\n    def __call__(self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale]) -> DataFrame:\n        data = data.copy(deep=False)\n        data['x'] = data['x'] + self.x\n        data['y'] = data['y'] + self.y\n        return data",
    "docstring": "Displacement of all marks with the same magnitude / direction. Parameters ---------- x, y : float Magnitude of shift, in data units, along each axis. Examples -------- .. include:: ../docstrings/objects.Shift.rst",
    "type": "class",
    "file_path": "seaborn\\seaborn\\_core\\moves.py",
    "ast_data": "ClassDef name:Shift FunctionDef name:__call__ arg:self arg:data arg:groupby arg:orient arg:scales arguments arg arg arg arg arg Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "gen_ops",
    "source_code": "def gen_ops(self) -> 'list[tuple[str, cutlass_gemm_op.GemmOperation]]':\n    assert cutlass_utils.try_import_cutlass()\n    import cutlass_library.gemm_operation as cutlass_gemm_op\n    import cutlass_library.library as cutlass_lib\n    if self.cache_key in self.filtered_ops_cache:\n        log.debug('Using cached ops for %s', self.cache_key)\n        return self.filtered_ops_cache[self.cache_key]\n    ops = cutlass_utils.gen_ops()[cutlass_lib.OperationKind.Gemm]\n    res: dict[str, cutlass_gemm_op.GemmOperation] = {}\n    start_time = time.time()\n    for op_dict in ops.values():\n        for op_list in op_dict.values():\n            for op in op_list:\n                assert isinstance(op, cutlass_gemm_op.GemmOperation)\n                filter_res = self.filter_op(op)\n                if filter_res is not None and res.get(filter_res.configuration_name(), None) is None:\n                    res[filter_res.configuration_name()] = filter_res\n    log.info('Got cutlass configs: total number of ops: %d. Filtering took %.2f seconds', len(res), time.time() - start_time)\n    sorted_res = sorted(res.items())\n    ret_res = sorted_res[:inductor_cuda_config.cutlass_max_profiling_configs]\n    if len(self.filtered_ops_cache) < 50:\n        self.filtered_ops_cache[self.cache_key] = ret_res\n    else:\n        log.debug('Not caching ops since filtered_ops_cache has reached size 50.')\n    return ret_res",
    "docstring": "Creates a list of Cutlass GemmOperation instances that match the operation this template is designed to represent. The matching is carried out with respect to the input and output specifications of the operation. No function arguments. Returns: List[Tuple[str, cutlass_gemm_op.GemmOperation]]: A list of (cutlass_name, GemmOperation) tuples that are compatible with the operation requirements of this template.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py",
    "ast_data": "FunctionDef name:gen_ops arg:self arguments arg Call If Compare Call Return return:yes Assign Call Assign Call For Call For Call For Call Assign Call If BoolOp Compare Compare Call Call Assign Call Call Call Call Assign Call Call Assign If Compare Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "render",
    "source_code": "def render(self, name, value, attrs=None, renderer=None):\n    context = self.get_context(name, value, attrs)\n    return self._render(self.template_name, context, renderer)",
    "docstring": "Render the widget as an HTML string.",
    "type": "method",
    "file_path": "django\\django\\forms\\widgets.py",
    "ast_data": "FunctionDef name:render arg:self arg:name arg:value arg:attrs arg:renderer arguments arg arg arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "clear_collection",
    "source_code": "def clear_collection(self, name) -> None:\n    self._check_not_finalized()\n    with self._lock:\n        if name in self._collections:\n            del self._collections[name]",
    "docstring": "Clears all values in a collection. Args: name: The key for the collection. The class contains many standard names for collections.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py",
    "ast_data": "FunctionDef name:clear_collection arg:self arg:name arguments arg arg Call With If Compare"
  },
  {
    "library": "matplotlib",
    "name": "_process_args",
    "source_code": "def _process_args(self, *args, **kwargs):\n    self.levels = args[0]\n    allsegs = args[1]\n    allkinds = args[2] if len(args) > 2 else None\n    self.zmax = np.max(self.levels)\n    self.zmin = np.min(self.levels)\n    if allkinds is None:\n        allkinds = [[None] * len(segs) for segs in allsegs]\n    if self.filled:\n        if len(allsegs) != len(self.levels) - 1:\n            raise ValueError('must be one less number of segments as levels')\n    elif len(allsegs) != len(self.levels):\n        raise ValueError('must be same number of segments as levels')\n    if len(allkinds) != len(allsegs):\n        raise ValueError('allkinds has different length to allsegs')\n    flatseglist = [s for seg in allsegs for s in seg]\n    points = np.concatenate(flatseglist, axis=0)\n    self._mins = points.min(axis=0)\n    self._maxs = points.max(axis=0)\n    self._paths = [Path.make_compound_path(*map(Path, segs, kinds)) for segs, kinds in zip(allsegs, allkinds)]\n    return kwargs",
    "docstring": "Process *args* and *kwargs*; override in derived classes. Must set self.levels, self.zmin and self.zmax, and update Axes limits.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\contour.py",
    "ast_data": "FunctionDef name:_process_args arg:self arguments arg arg arg Assign Assign Assign Compare Call Assign Call Assign Call If Compare Assign Call If If Compare Call Call Raise Call If Compare Call Call Raise Call If Compare Call Call Raise Call Assign Assign Call Assign Call Assign Call Assign Call Call Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "bincount",
    "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)\ndef bincount(x, weights=None, minlength=None):\n    return (x, weights)",
    "docstring": "bincount(x, /, weights=None, minlength=0) Count number of occurrences of each value in array of non-negative ints. The number of bins (of size 1) is one larger than the largest value in . If is specified, there will be at least this number of bins in the output array (though it will be longer if necessary, depending on the contents of ). Each bin gives the number of occurrences of its index value in . If is specified the input array is weighted by it, i.e. if a value `xoutminlength` keyword. >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights >>> x = np.array([0, 1, 1, 2, 2, 2]) >>> np.bincount(x, weights=w) array([ 0.3, 0.7, 1.1])",
    "type": "function",
    "file_path": "numpy\\numpy\\_core\\multiarray.py",
    "ast_data": "FunctionDef name:bincount arg:x arg:weights arg:minlength arguments arg arg arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_build_names_mapper",
    "source_code": "def _build_names_mapper(rownames: list[str], colnames: list[str]) -> tuple[dict[str, str], list[str], dict[str, str], list[str]]:\n    dup_names = set(rownames) | set(colnames)\n    rownames_mapper = {f'row_{i}': name for i, name in enumerate(rownames) if name in dup_names}\n    unique_rownames = [f'row_{i}' if name in dup_names else name for i, name in enumerate(rownames)]\n    colnames_mapper = {f'col_{i}': name for i, name in enumerate(colnames) if name in dup_names}\n    unique_colnames = [f'col_{i}' if name in dup_names else name for i, name in enumerate(colnames)]\n    return (rownames_mapper, unique_rownames, colnames_mapper, unique_colnames)",
    "docstring": "Given the names of a DataFrame's rows and columns, returns a set of unique row and column names and mappers that convert to original names. A row or column name is replaced if it is duplicate among the rows of the inputs, among the columns of the inputs or between the rows and the columns. Parameters ---------- rownames: list[str] colnames: list[str] Returns ------- Tuple(Dict[str, str], List[str], Dict[str, str], List[str]) rownames_mapper: dict[str, str] a dictionary with new row names as keys and original rownames as values unique_rownames: list[str] a list of rownames with duplicate names replaced by dummy names colnames_mapper: dict[str, str] a dictionary with new column names as keys and original column names as values unique_colnames: list[str] a list of column names with duplicate names replaced by dummy names",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\reshape\\pivot.py",
    "ast_data": "FunctionDef name:_build_names_mapper arg:rownames arg:colnames arguments arg arg Assign Call Call Assign Call Compare Assign Compare Call Assign Call Compare Assign Compare Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "HandlerNpointsYoffsets",
    "source_code": "class HandlerNpointsYoffsets(HandlerNpoints):\n\n    def __init__(self, numpoints=None, yoffsets=None, **kwargs):\n        super().__init__(numpoints=numpoints, **kwargs)\n        self._yoffsets = yoffsets\n\n    def get_ydata(self, legend, xdescent, ydescent, width, height, fontsize):\n        if self._yoffsets is None:\n            ydata = height * legend._scatteryoffsets\n        else:\n            ydata = height * np.asarray(self._yoffsets)\n        return ydata",
    "docstring": "A legend handler that shows *numpoints* in the legend, and allows them to be individually offset in the y-direction.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py",
    "ast_data": "ClassDef name:HandlerNpointsYoffsets FunctionDef name:__init__ arg:self arg:numpoints arg:yoffsets arguments arg arg arg arg Call Call Assign FunctionDef name:get_ydata arg:self arg:legend arg:xdescent arg:ydescent arg:width arg:height arg:fontsize arguments arg arg arg arg arg arg arg If Compare Assign Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_RendezvousStateHolder",
    "source_code": "class _RendezvousStateHolder(ABC):\n\n    @property\n    @abstractmethod\n    def state(self) -> _RendezvousState:\n        pass\n\n    @abstractmethod\n    def sync(self) -> Optional[bool]:\n        pass\n\n    @abstractmethod\n    def mark_dirty(self) -> None:\n        pass",
    "docstring": "Hold the shared rendezvous state synced with other nodes.",
    "type": "class",
    "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py",
    "ast_data": "ClassDef name:_RendezvousStateHolder FunctionDef name:state arg:self arguments arg FunctionDef name:sync arg:self arguments arg FunctionDef name:mark_dirty arg:self arguments arg"
  },
  {
    "library": "scikit-learn",
    "name": "check_non_negative",
    "source_code": "def check_non_negative(X, whom):\n    xp, _ = get_namespace(X)\n    if sp.issparse(X):\n        if X.format in ['lil', 'dok']:\n            X = X.tocsr()\n        if X.data.size == 0:\n            X_min = 0\n        else:\n            X_min = X.data.min()\n    else:\n        X_min = xp.min(X)\n    if X_min < 0:\n        raise ValueError(f'Negative values in data passed to {whom}.')",
    "docstring": "Check if there is any negative value in an array. Parameters ---------- X : {array-like, sparse matrix} Input data. whom : str Who passed X to this function.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\utils\\validation.py",
    "ast_data": "FunctionDef name:check_non_negative arg:X arg:whom arguments arg arg Assign Call If Call If Compare Assign Call If Compare Assign Assign Call Assign Call If Compare Raise Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_inputs_match",
    "source_code": "def _make_inputs_match(branch_graphs, branch_inputs):\n    assert len(branch_graphs) == len(branch_inputs)\n    added_inputs = set()\n    new_inputs = []\n    for branch_in in branch_inputs:\n        for tensor in branch_in:\n            tensor_id = ops.tensor_id(tensor)\n            if tensor_id not in added_inputs:\n                added_inputs.add(tensor_id)\n                new_inputs.append(tensor)\n    for branch_graph, branch_in in zip(branch_graphs, branch_inputs):\n        input_ids = [ops.tensor_id(t) for t in branch_in]\n        branch_input_to_param = dict(zip(input_ids, branch_graph.inputs))\n        input_list = []\n        for in_t in new_inputs:\n            param = branch_input_to_param.get(ops.tensor_id(in_t))\n            if param is None:\n                param = _create_dummy_input(branch_graph, in_t)\n            input_list.append(param)\n        branch_graph.inputs = input_list\n        branch_graph.function_captures.reset_captures(new_inputs, branch_graph.inputs)\n    return new_inputs",
    "docstring": "Modifies branch_graphs so they have the same input signature. This method reorders and/or adds parameters to each graph in branch_graphs so they have the same input signature, and updates the 'inputs' and 'captured' fields of each graph accordingly. It uses the input tensors from the outer graph to avoid duplicating shared arguments. Args: branch_graphs: a of branch_inputs: a of s of s in the outer graph. The inputs for the corresponding graph in . Returns: A new list of Tensors from the outer graph that are the new inputs for each branch_graph. This is a deduped version of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py",
    "ast_data": "FunctionDef name:_make_inputs_match arg:branch_graphs arg:branch_inputs arguments arg arg Compare Call Call Assign Call Assign For For Assign Call If Compare Call Call For Call Assign Call Assign Call Call Assign For Assign Call Call If Compare Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "ymax",
    "source_code": "@property\ndef ymax(self) -> torch.Tensor:\n    return self._data[..., 3]",
    "docstring": "The bounding box bottom-right y-coordinate.",
    "type": "method",
    "file_path": "kornia\\kornia\\contrib\\face_detection.py",
    "ast_data": "FunctionDef name:ymax arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_flag_value_to_re_list",
    "source_code": "def _flag_value_to_re_list(self, flag_name):\n    re_list = []\n    found, flag_value = self.get_flag_value(flag_name)\n    if not found or not flag_value:\n        return re_list\n    list_of_values = flag_value.split(',')\n    for v in list_of_values:\n        r = re.compile(v)\n        re_list.append(r)\n    return re_list",
    "docstring": "Converts list of strings to compiled RE.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:_flag_value_to_re_list arg:self arg:flag_name arguments arg arg Assign Assign Call If BoolOp Return return:yes Assign Call For Assign Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "_destroy_test_db",
    "source_code": "def _destroy_test_db(self, test_database_name, verbosity):\n    with self._nodb_cursor() as cursor:\n        cursor.execute('DROP DATABASE %s' % self.connection.ops.quote_name(test_database_name))",
    "docstring": "Internal implementation - remove the test db tables.",
    "type": "method",
    "file_path": "django\\django\\db\\backends\\base\\creation.py",
    "ast_data": "FunctionDef name:_destroy_test_db arg:self arg:test_database_name arg:verbosity arguments arg arg arg With Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "_load_for_lite_interpreter",
    "source_code": "def _load_for_lite_interpreter(f, map_location=None):\n    if isinstance(f, (str, os.PathLike)):\n        if not os.path.exists(f):\n            raise ValueError(f'The provided filename {f} does not exist')\n        if os.path.isdir(f):\n            raise ValueError(f'The provided filename {f} is a directory')\n    map_location = validate_map_location(map_location)\n    if isinstance(f, (str, os.PathLike)):\n        cpp_module = torch._C._load_for_lite_interpreter(os.fspath(f), map_location)\n    else:\n        cpp_module = torch._C._load_for_lite_interpreter_from_buffer(f.read(), map_location)\n    return LiteScriptModule(cpp_module)",
    "docstring": "Load a :class: saved with :func:. Args: f: a file-like object (has to implement read, readline, tell, and seek), or a string containing a file name map_location: a string or torch.device used to dynamically remap storages to an alternative set of devices. Returns: A :class: object. Example: .. testcode:: import torch import io # Load LiteScriptModule from saved file path torch.jit._load_for_lite_interpreter('lite_script_module.pt') # Load LiteScriptModule from io.BytesIO object with open('lite_script_module.pt', 'rb') as f: buffer = io.BytesIO(f.read()) # Load all tensors to the original device torch.jit.mobile._load_for_lite_interpreter(buffer)",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\mobile\\__init__.py",
    "ast_data": "FunctionDef name:_load_for_lite_interpreter arg:f arg:map_location arguments arg arg If Call If Call Raise Call If Call Raise Call Assign Call If Call Assign Call Call Assign Call Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "SparseEfficiencyWarning",
    "source_code": "class SparseEfficiencyWarning(SparseWarning):\n    pass",
    "docstring": "The warning emitted when the operation is inefficient for sparse matrices.",
    "type": "class",
    "file_path": "scipy\\scipy\\sparse\\_base.py",
    "ast_data": "ClassDef name:SparseEfficiencyWarning"
  },
  {
    "library": "pytorch",
    "name": "_normalize_kwargs",
    "source_code": "def _normalize_kwargs(func: Callable, loc: dict[str, Any]) -> 'OrderedDict[str, Any]':\n    default_kwargs = _get_default_kwargs(func)\n    local_kwargs = _get_signature_locals(func, loc)\n    normalized_kwargs = default_kwargs.copy()\n    for attr, val in local_kwargs.items():\n        if attr in normalized_kwargs:\n            normalized_kwargs[attr] = val\n    return normalized_kwargs",
    "docstring": "Given a function and local function arguments, normalize the keyword arguments by filling in default arguments from function signature Example:: >> def f(self, key1=3, key2=3): pass >> loc = {\"key2\": 6} >> _normalize_kwargs(f, loc) {\"key1\": 3, \"key2\": 6}",
    "type": "function",
    "file_path": "pytorch\\torch\\ao\\quantization\\utils.py",
    "ast_data": "FunctionDef name:_normalize_kwargs arg:func arg:loc arguments arg arg Assign Call Assign Call Assign Call For Call If Compare Assign Return return:yes"
  },
  {
    "library": "django",
    "name": "DisallowedModelAdminToField",
    "source_code": "class DisallowedModelAdminToField(SuspiciousOperation):\n    pass",
    "docstring": "Invalid to_field was passed to admin view via URL query string",
    "type": "class",
    "file_path": "django\\django\\contrib\\admin\\exceptions.py",
    "ast_data": "ClassDef name:DisallowedModelAdminToField"
  },
  {
    "library": "django",
    "name": "unquote",
    "source_code": "def unquote(s):\n    return UNQUOTE_RE.sub(lambda m: UNQUOTE_MAP[m[0]], s)",
    "docstring": "Undo the effects of quote().",
    "type": "function",
    "file_path": "django\\django\\contrib\\admin\\utils.py",
    "ast_data": "FunctionDef name:unquote arg:s arguments arg Return return:yes Call arguments arg"
  },
  {
    "library": "tensorflow",
    "name": "_shape_and_dtype_str",
    "source_code": "def _shape_and_dtype_str(tensor):\n    return 'shape=%s dtype=%s' % (tensor.shape, tensor.dtype.name)",
    "docstring": "Returns a string containing tensor's shape and dtype.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py",
    "ast_data": "FunctionDef name:_shape_and_dtype_str arg:tensor arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self):\n    vmin, vmax = self.axis.get_view_interval()\n    return self.tick_values(vmin, vmax)",
    "docstring": "Return the locations of the ticks.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\ticker.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "RejectionResampleBenchmark",
    "source_code": "class RejectionResampleBenchmark(benchmark_base.DatasetBenchmarkBase):\n\n    def benchmark_resample_performance(self):\n        init_dist = [0.25, 0.25, 0.25, 0.25]\n        target_dist = [0.0, 0.0, 0.0, 1.0]\n        num_classes = len(init_dist)\n        num_samples = 1000\n        data_np = np.random.choice(num_classes, num_samples, p=init_dist)\n        dataset = dataset_ops.Dataset.from_tensor_slices(data_np).repeat()\n        dataset = dataset.apply(resampling.rejection_resample(class_func=lambda x: x, target_dist=target_dist, initial_dist=init_dist, seed=142))\n        options = options_lib.Options()\n        options.experimental_optimization.apply_default_optimizations = False\n        dataset = dataset.with_options(options)\n        wall_time = self.run_benchmark(dataset=dataset, num_elements=num_samples, iters=10, warmup=True)\n        resample_time = wall_time * num_samples\n        self.report_benchmark(iters=10, wall_time=resample_time, extras={'model_name': 'rejection_resample.benchmark.1', 'parameters': '%d' % num_samples}, name='resample_{}'.format(num_samples))",
    "docstring": "Benchmarks for .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\benchmarks\\rejection_resample_benchmark.py",
    "ast_data": "ClassDef name:RejectionResampleBenchmark FunctionDef name:benchmark_resample_performance arg:self arguments arg Assign Assign Assign Call Assign Assign Call Assign Call Call Assign Call Call arguments arg Assign Call Assign Assign Call Assign Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "HelperFunctions",
    "source_code": "class HelperFunctions:\n    _templates_seen: dict[str, str]\n    finalized_helpers: list[str]\n\n    def __init__(self) -> None:\n        self._templates_seen = {}\n        self.finalized_helpers = []\n\n    def add(self, template_code: str, *, base_name='_triton_helper_fn') -> str:\n        existing_name = self._templates_seen.get(template_code)\n        if existing_name is not None:\n            return existing_name\n        name = f'{base_name}{len(self.finalized_helpers)}'\n        self._templates_seen[template_code] = name\n        self.finalized_helpers.append(template_code.format(name=name))\n        return name\n\n    def __iter__(self):\n        return iter(self.finalized_helpers)\n\n    def __getitem__(self, idx):\n        return self.finalized_helpers[idx]",
    "docstring": "An ordered set of helper functions.",
    "type": "class",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py",
    "ast_data": "ClassDef name:HelperFunctions FunctionDef name:__init__ arg:self arguments arg Assign Assign FunctionDef name:add arg:self arg:template_code arguments arg arg arg Assign Call If Compare Return return:yes Assign Call Assign Call Call Return return:yes FunctionDef name:__iter__ arg:self arguments arg Return return:yes Call FunctionDef name:__getitem__ arg:self arg:idx arguments arg arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_n_features_out",
    "source_code": "@property\ndef _n_features_out(self):\n    return self.components_.shape[0]",
    "docstring": "Number of transformed output features.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\decomposition\\_sparse_pca.py",
    "ast_data": "FunctionDef name:_n_features_out arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_gapcolor",
    "source_code": "def get_gapcolor(self):\n    return self._gapcolor",
    "docstring": "Return the line gapcolor. See also .",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\lines.py",
    "ast_data": "FunctionDef name:get_gapcolor arg:self arguments arg Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, X):\n    if self.solver == 'lsqr':\n        raise NotImplementedError(\"transform not implemented for 'lsqr' solver (use 'svd' or 'eigen').\")\n    check_is_fitted(self)\n    xp, _ = get_namespace(X)\n    X = validate_data(self, X, reset=False)\n    if self.solver == 'svd':\n        X_new = (X - self.xbar_) @ self.scalings_\n    elif self.solver == 'eigen':\n        X_new = X @ self.scalings_\n    return X_new[:, :self._max_components]",
    "docstring": "Project data to maximize class separation. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- X_new : ndarray of shape (n_samples, n_components) or (n_samples, min(rank, n_components)) Transformed data. In the case of the 'svd' solver, the shape is (n_samples, min(rank, n_components)).",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg If Compare Raise Call Call Assign Call Assign Call If Compare Assign If Compare Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "transform_boxes_",
    "source_code": "def transform_boxes_(self, M: torch.Tensor) -> Boxes:\n    return self.transform_boxes(M, inplace=True)",
    "docstring": "Inplace version of :func:.",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\boxes.py",
    "ast_data": "FunctionDef name:transform_boxes_ arg:self arg:M arguments arg arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_new_args_filter",
    "source_code": "@classmethod\ndef _new_args_filter(cls, arg_sequence):\n    for arg in arg_sequence:\n        if not isinstance(arg, Expr) or arg.is_extended_real is False or (arg.is_number and (not arg.is_comparable)):\n            raise ValueError(f\"The argument '{arg}' is not comparable.\")\n        if arg == cls.zero:\n            raise ShortCircuit(arg)\n        elif arg == cls.identity:\n            continue\n        elif arg.func == cls:\n            yield from arg.args\n        else:\n            yield arg",
    "docstring": "Generator filtering args. first standard filter, for cls.zero and cls.identity. Also reshape ``, and check arguments for comparability",
    "type": "method",
    "file_path": "pytorch\\torch\\utils\\_sympy\\functions.py",
    "ast_data": "FunctionDef name:_new_args_filter arg:cls arg:arg_sequence arguments arg arg For If BoolOp Call Compare BoolOp Raise Call If Compare Raise Call If Compare If Compare"
  },
  {
    "library": "pytorch",
    "name": "_guard_sizes_oblivious",
    "source_code": "def _guard_sizes_oblivious(lhs_sizes: Sequence[Union[torch.SymInt, bool]], rhs_sizes: Sequence[Union[torch.SymInt, bool]]) -> bool:\n    return len(lhs_sizes) == len(rhs_sizes) and all((guard_size_oblivious(lhs_item == rhs_item) for lhs_item, rhs_item in zip(lhs_sizes, rhs_sizes)))",
    "docstring": "Leverage guard_size_oblivious to compare if two lists of int/symint are equal. Useful to compare sizes, strides etc.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py",
    "ast_data": "FunctionDef name:_guard_sizes_oblivious arg:lhs_sizes arg:rhs_sizes arguments arg arg Return return:yes BoolOp Compare Call Call Call Call Compare Call"
  },
  {
    "library": "pytorch",
    "name": "handle_import_error",
    "source_code": "def handle_import_error() -> NoReturn:\n    print('Error: PyTorch is not installed in the current Python environment.')\n    sys.exit(1)",
    "docstring": "Handle the case where PyTorch is not installed and exit the program. Exits: NoReturn: This function will terminate the program.",
    "type": "function",
    "file_path": "pytorch\\tools\\nightly_hotpatch.py",
    "ast_data": "FunctionDef name:handle_import_error arguments Call Call"
  },
  {
    "library": "pytorch",
    "name": "check_is_root",
    "source_code": "def check_is_root(self) -> bool:\n    return _is_fsdp_root(self, self)",
    "docstring": "Check if this instance is a root FSDP module.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py",
    "ast_data": "FunctionDef name:check_is_root arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "_check_unsampled_image",
    "source_code": "def _check_unsampled_image(self):\n    return self.get_interpolation() == 'none'",
    "docstring": "Return whether the image would be better drawn unsampled.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:_check_unsampled_image arg:self arguments arg Return return:yes Compare Call"
  },
  {
    "library": "matplotlib",
    "name": "_width_of",
    "source_code": "def _width_of(self, char):\n    metrics = self._tfm.get_metrics(char)\n    if metrics is None:\n        _log.debug('No width for char %d in font %s.', char, self.texname)\n        return 0\n    return _mul1220(metrics.tex_width, self._scale)",
    "docstring": "Width of char in dvi units.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\dviread.py",
    "ast_data": "FunctionDef name:_width_of arg:self arg:char arguments arg arg Assign Call If Compare Call Return return:yes Return return:yes Call"
  },
  {
    "library": "django",
    "name": "formfield_for_choice_field",
    "source_code": "def formfield_for_choice_field(self, db_field, request, **kwargs):\n    if db_field.name in self.radio_fields:\n        if 'widget' not in kwargs:\n            kwargs['widget'] = widgets.AdminRadioSelect(attrs={'class': get_ul_class(self.radio_fields[db_field.name])})\n        if 'choices' not in kwargs:\n            kwargs['choices'] = db_field.get_choices(include_blank=db_field.blank, blank_choice=[('', _('None'))])\n    return db_field.formfield(**kwargs)",
    "docstring": "Get a form Field for a database Field that has declared choices.",
    "type": "method",
    "file_path": "django\\django\\contrib\\admin\\options.py",
    "ast_data": "FunctionDef name:formfield_for_choice_field arg:self arg:db_field arg:request arguments arg arg arg arg If Compare If Compare Assign Call Call If Compare Assign Call Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "pre_save",
    "source_code": "def pre_save(self):\n    check_metadata_cacheable(self.runtime_metadata)\n    self.compiled_fw.pre_save()\n    if self.compiled_bw is not None:\n        self.compiled_bw.pre_save()",
    "docstring": "Perform any preparations to make the cache entry ready for serialization.",
    "type": "method",
    "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py",
    "ast_data": "FunctionDef name:pre_save arg:self arguments arg Call Call If Compare Call"
  },
  {
    "library": "kornia",
    "name": "dx_distort_points_affine",
    "source_code": "def dx_distort_points_affine(projected_points_in_camera_z1_plane: Tensor, params: Tensor) -> Tensor:\n    KORNIA_CHECK_SHAPE(projected_points_in_camera_z1_plane, ['*', '2'])\n    KORNIA_CHECK_SHAPE(params, ['*', '4'])\n    fx, fy = (params[..., 0], params[..., 1])\n    zeros = ops.zeros_like(fx)\n    return ops.stack([ops.stack([fx, zeros], dim=-1), ops.stack([zeros, fy], dim=-1)], dim=-2)",
    "docstring": "Compute the derivative of the x distortion with respect to the x coordinate. .. math:: \\frac{\\partial u}{\\partial x} = \\begin{bmatrix} f_x & 0 \\\\ 0 & f_y \\end{bmatrix} Args: projected_points_in_camera_z1_plane: Tensor representing the points to distort with shape (..., 2). params: Tensor representing the parameters of the affine distortion model with shape (..., 4). Returns: Tensor representing the derivative of the x distortion with respect to the x coordinate with shape (..., 2). Example: >>> points = torch.tensor([319.5, 239.5]) # center of a 640x480 image >>> params = torch.tensor([600., 600., 319.5, 239.5]) >>> dx_distort_points_affine(points, params) tensor([[600., 0.], [ 0., 600.]])",
    "type": "function",
    "file_path": "kornia\\kornia\\geometry\\camera\\distortion_affine.py",
    "ast_data": "FunctionDef name:dx_distort_points_affine arg:projected_points_in_camera_z1_plane arg:params arguments arg arg Call Call Assign Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "conv1d",
    "source_code": "@dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs\ndef conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1):\n    if data_format is None:\n        data_format = image_data_format()\n    if data_format not in {'channels_first', 'channels_last'}:\n        raise ValueError('Unknown data_format: ' + str(data_format))\n    kernel_shape = kernel.shape.as_list()\n    if padding == 'causal':\n        left_pad = dilation_rate * (kernel_shape[0] - 1)\n        x = temporal_padding(x, (left_pad, 0))\n        padding = 'valid'\n    padding = _preprocess_padding(padding)\n    x, tf_data_format = _preprocess_conv1d_input(x, data_format)\n    x = nn.convolution(input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format)\n    if data_format == 'channels_first' and tf_data_format == 'NWC':\n        x = array_ops.transpose(x, (0, 2, 1))\n    return x",
    "docstring": "1D convolution. Args: x: Tensor or variable. kernel: kernel tensor. strides: stride integer. padding: string, , or . data_format: string, one of \"channels_last\", \"channels_first\". dilation_rate: integer dilate rate. Returns: A tensor, result of 1D convolution. Raises: ValueError: if is neither or .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py",
    "ast_data": "FunctionDef name:conv1d arg:x arg:kernel arg:strides arg:padding arg:data_format arg:dilation_rate arguments arg arg arg arg arg arg If Compare Assign Call If Compare Raise Call Call Assign Call If Compare Assign Assign Call Assign Assign Call Assign Call Assign Call If BoolOp Compare Compare Assign Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "DexiNedBuilder",
    "source_code": "class DexiNedBuilder:\n\n    @staticmethod\n    def build(model_name: str='dexined', pretrained: bool=True, image_size: int=352) -> EdgeDetector:\n        if model_name.lower() == 'dexined':\n            norm = Normalize(mean=tensor([[0.485, 0.456, 0.406]]), std=tensor([[1.0 / 255.0] * 3]))\n            model = nn.Sequential(norm, DexiNed(pretrained=pretrained), nn.Sigmoid())\n        else:\n            raise ValueError(f\"Model {model_name} not found. Please choose from 'DexiNed'.\")\n        return EdgeDetector(model, ResizePreProcessor(image_size, image_size), ResizePostProcessor(), name='dexined')",
    "docstring": "DexiNedBuilder is a class that builds a DexiNed model. .. code-block:: python images = kornia.utils.sample.get_sample_images() model = DexiNedBuilder.build() model.save(images)",
    "type": "class",
    "file_path": "kornia\\kornia\\models\\edge_detection\\dexined.py",
    "ast_data": "ClassDef name:DexiNedBuilder FunctionDef name:build arg:model_name arg:pretrained arg:image_size arguments arg arg arg If Compare Call Assign Call Call Call Assign Call Call Call Raise Call Return return:yes Call Call Call"
  },
  {
    "library": "scipy",
    "name": "id_to_svd",
    "source_code": "def id_to_svd(B, idx, proj):\n    B = _C_contiguous_copy(B)\n    if _is_real(B):\n        U, S, V = _backend.idd_id2svd(B, idx, proj)\n    else:\n        U, S, V = _backend.idz_id2svd(B, idx, proj)\n    return (U, S, V)",
    "docstring": "Convert ID to SVD. The SVD reconstruction of a matrix with skeleton matrix and ID indices and coefficients and , respectively, is:: U, S, V = id_to_svd(B, idx, proj) A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) See also :func:. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func: and :func:. Parameters ---------- B : :class: Skeleton matrix. idx : :class: 1D column index array. proj : :class: Interpolation coefficients. Returns ------- U : :class: Left singular vectors. S : :class: Singular values. V : :class: Right singular vectors.",
    "type": "function",
    "file_path": "scipy\\scipy\\linalg\\interpolative.py",
    "ast_data": "FunctionDef name:id_to_svd arg:B arg:idx arg:proj arguments arg arg arg Assign Call If Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_pad_util",
    "source_code": "def _pad_util(input_tensor, full_axis_dim):\n    missing_axis_dim = full_axis_dim - array_ops.shape_v2(input_tensor)[0]\n    tensor_rank = array_ops.rank(input_tensor)\n    paddings_axis = [[0, missing_axis_dim]]\n    paddings = array_ops.concat([paddings_axis, array_ops.zeros(shape=(tensor_rank - 1, 2), dtype=dtypes.int32)], axis=0)\n    padded_input_tensor = array_ops.pad(input_tensor, paddings)\n    return padded_input_tensor",
    "docstring": "Pad the 's first dimension to be .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py",
    "ast_data": "FunctionDef name:_pad_util arg:input_tensor arg:full_axis_dim arguments arg arg Assign Call Assign Call Assign Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "seaborn",
    "name": "_parse_local_version",
    "source_code": "def _parse_local_version(local: str) -> Optional[LocalType]:\n    if local is not None:\n        return tuple((part.lower() if not part.isdigit() else int(part) for part in _local_version_separators.split(local)))\n    return None",
    "docstring": "Takes a string like abc.1.twelve and turns it into (\"abc\", 1, \"twelve\").",
    "type": "function",
    "file_path": "seaborn\\seaborn\\external\\version.py",
    "ast_data": "FunctionDef name:_parse_local_version arg:local arguments arg If Compare Return return:yes Call Call Call Call Call Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_get_summary_signatures",
    "source_code": "def _get_summary_signatures(self):\n    signatures = self._flag_value_as_list(FLAG_NAME_SUMMARY_SIGNATURES)\n    supported_signatures = self._supported_signatures()\n    tt_signatures = []\n    for signature in signatures:\n        signature_with_prefix = '%s_%s' % (_TT_PREFIX, signature)\n        if signature in supported_signatures:\n            tt_signatures.append(signature)\n        elif signature_with_prefix in supported_signatures:\n            tt_signatures.append(signature_with_prefix)\n        else:\n            logging.warning('Unknown signature:%s. Supported signatures: %s' % (signature, supported_signatures))\n    if not tt_signatures:\n        return {TT_SUMMARY_MAX_ABS: 0, TT_SUMMARY_NORM: 1}\n    else:\n        return {signature: idx for idx, signature in enumerate(tt_signatures)}",
    "docstring": "Verifies and returns the summary signatures. Returns: A dictionary of the signature identifiers {signature: index} that will be computed when trace_mode is summary.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py",
    "ast_data": "FunctionDef name:_get_summary_signatures arg:self arguments arg Assign Call Assign Call Assign For Assign If Compare Call If Compare Call Call If Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_record_tape",
    "source_code": "def _record_tape(self, capture):\n    record.record_operation('captured_value', [self], [capture], backward_function=lambda x: [x], forward_function=lambda x: [x])",
    "docstring": "Connect this graph tensor with capture for gradients calculation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py",
    "ast_data": "FunctionDef name:_record_tape arg:self arg:capture arguments arg arg Call arguments arg arguments arg"
  },
  {
    "library": "sphinx",
    "name": "copy_asset_file",
    "source_code": "def copy_asset_file(source: str | os.PathLike[str], destination: str | os.PathLike[str], context: dict[str, Any] | None=None, renderer: BaseRenderer | None=None, *, force: bool=False) -> None:\n    source = Path(source)\n    if not source.exists():\n        return\n    destination = Path(destination)\n    if destination.is_dir():\n        destination /= source.name\n    if _template_basename(source) and context is not None:\n        if renderer is None:\n            from sphinx.util.template import SphinxRenderer\n            renderer = SphinxRenderer()\n        template_content = source.read_text(encoding='utf-8')\n        rendered_template = renderer.render_string(template_content, context)\n        if not force and destination.exists() and (template_content != rendered_template):\n            msg = __('Aborted attempted copy from rendered template %s to %s (the destination path has existing data).')\n            logger.warning(msg, os.fsdecode(source), os.fsdecode(destination), type='misc', subtype='copy_overwrite')\n            return\n        destination = _template_basename(destination) or destination\n        msg = __('Writing evaluated template result to %s')\n        logger.info(msg, os.fsdecode(destination), type='misc', subtype='template_evaluation')\n        destination.write_text(rendered_template, encoding='utf-8')\n    else:\n        copyfile(source, destination, force=force)",
    "docstring": "Copy an asset file to destination. On copying, it expands the template variables if context argument is given and the asset is a template file. :param source: The path to source file :param destination: The path to destination file or directory :param context: The template variables. If not given, template files are simply copied :param renderer: The template engine. If not given, SphinxRenderer is used by default :param bool force: Overwrite the destination file even if it exists.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\util\\fileutil.py",
    "ast_data": "FunctionDef name:copy_asset_file arg:source arg:destination arg:context arg:renderer arguments arg arg arg arg arg Assign Call If Call Return return:no Assign Call If Call If BoolOp Call Compare If Compare Assign Call Assign Call Assign Call If BoolOp Call Compare Assign Call Call Call Call Return return:no Assign BoolOp Call Assign Call Call Call Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "sample",
    "source_code": "def sample(self, n_samples=1, random_state=None):\n    check_is_fitted(self)\n    if self.kernel not in ['gaussian', 'tophat']:\n        raise NotImplementedError()\n    data = np.asarray(self.tree_.data)\n    rng = check_random_state(random_state)\n    u = rng.uniform(0, 1, size=n_samples)\n    if self.tree_.sample_weight is None:\n        i = (u * data.shape[0]).astype(np.int64)\n    else:\n        cumsum_weight = np.cumsum(np.asarray(self.tree_.sample_weight))\n        sum_weight = cumsum_weight[-1]\n        i = np.searchsorted(cumsum_weight, u * sum_weight)\n    if self.kernel == 'gaussian':\n        return np.atleast_2d(rng.normal(data[i], self.bandwidth_))\n    elif self.kernel == 'tophat':\n        dim = data.shape[1]\n        X = rng.normal(size=(n_samples, dim))\n        s_sq = row_norms(X, squared=True)\n        correction = gammainc(0.5 * dim, 0.5 * s_sq) ** (1.0 / dim) * self.bandwidth_ / np.sqrt(s_sq)\n        return data[i] + X * correction[:, np.newaxis]",
    "docstring": "Generate random samples from the model. Currently, this is implemented only for gaussian and tophat kernels. Parameters ---------- n_samples : int, default=1 Number of samples to generate. random_state : int, RandomState instance or None, default=None Determines random number generation used to generate random samples. Pass an int for reproducible results across multiple function calls. See :term:. Returns ------- X : array-like of shape (n_samples, n_features) List of samples.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_kde.py",
    "ast_data": "FunctionDef name:sample arg:self arg:n_samples arg:random_state arguments arg arg arg Call If Compare Raise Call Assign Call Assign Call Assign Call If Compare Assign Call Assign Call Call Assign Assign Call If Compare Return return:yes Call Call If Compare Assign Assign Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_verify_and_return_same_core_count",
    "source_code": "@staticmethod\ndef _verify_and_return_same_core_count(device_dict):\n    num_cores_per_host_set = {len(core_ids) for core_ids in device_dict.values()}\n    if len(num_cores_per_host_set) != 1:\n        raise RuntimeError('TPU cores on each device is not the same. This should never happen. Devices: {}'.format(device_dict))\n    return num_cores_per_host_set.pop()",
    "docstring": "Verifies that every device in device_dict has the same # of cores.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py",
    "ast_data": "FunctionDef name:_verify_and_return_same_core_count arg:device_dict arguments arg Assign Call Call If Compare Call Raise Call Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "_range2cols",
    "source_code": "def _range2cols(areas: str) -> list[int]:\n    cols: list[int] = []\n    for rng in areas.split(','):\n        if ':' in rng:\n            rngs = rng.split(':')\n            cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1))\n        else:\n            cols.append(_excel2num(rng))\n    return cols",
    "docstring": "Convert comma separated list of column names and ranges to indices. Parameters ---------- areas : str A string containing a sequence of column ranges (or areas). Returns ------- cols : list A list of 0-based column indices. Examples -------- >>> _range2cols(\"A:E\") [0, 1, 2, 3, 4] >>> _range2cols(\"A,C,Z:AB\") [0, 2, 25, 26, 27]",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\excel\\_util.py",
    "ast_data": "FunctionDef name:_range2cols arg:areas arguments arg For Call If Compare Assign Call Call Call Call Call Call Call Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=False)\ndef fit(self, X, y=None):\n    self._fit_transform(X)\n    return self",
    "docstring": "Compute the embedding vectors for data X. Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array, sparse matrix, precomputed tree, or NearestNeighbors object. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns a fitted instance of self.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\manifold\\_isomap.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arguments arg arg arg Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "TFVersionCombination",
    "source_code": "class TFVersionCombination(test_combinations.TestCombination):\n\n    def should_execute_combination(self, kwargs):\n        tf_api_version = kwargs.pop('tf_api_version', None)\n        if tf_api_version == 1 and tf2.enabled():\n            return (False, 'Skipping a TF1.x test when TF2 is enabled.')\n        elif tf_api_version == 2 and (not tf2.enabled()):\n            return (False, 'Skipping a TF2 test when TF2 is not enabled.')\n        return (True, None)\n\n    def parameter_modifiers(self):\n        return [test_combinations.OptionalParameter('tf_api_version')]",
    "docstring": "Control the execution of the test in TF1.x and TF2. If TF2 is enabled then a test with TF1 test is going to be skipped and vice versa. Test targets continuously run in TF2 thanks to the tensorflow.v2 TAP target. A test can be run in TF2 with bazel by passing --test_env=TF2_BEHAVIOR=1.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\combinations.py",
    "ast_data": "ClassDef name:TFVersionCombination FunctionDef name:should_execute_combination arg:self arg:kwargs arguments arg arg Assign Call If BoolOp Compare Call Return return:yes If BoolOp Compare Call Return return:yes Return return:yes FunctionDef name:parameter_modifiers arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "load_descr",
    "source_code": "def load_descr(descr_file_name, *, descr_module=DESCR_MODULE, encoding='utf-8'):\n    path = resources.files(descr_module) / descr_file_name\n    return path.read_text(encoding=encoding)",
    "docstring": "Load from with . Parameters ---------- descr_file_name : str, default=None Name of rst file to be loaded from . For example . See also :func:. If not None, also returns the corresponding description of the dataset. descr_module : str or module, default='sklearn.datasets.descr' Module where lives. See also :func:. The default is . encoding : str, default=\"utf-8\" Name of the encoding that will be decoded with. The default is 'utf-8'. .. versionadded:: 1.4 Returns ------- fdescr : str Content of .",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\datasets\\_base.py",
    "ast_data": "FunctionDef name:load_descr arg:descr_file_name arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "_callback",
    "source_code": "def _callback(self, transformation):\n    if self.callback is not None:\n        self.callback(transformation, self.n_iter_)\n    self.n_iter_ += 1",
    "docstring": "Called after each iteration of the optimizer. Parameters ---------- transformation : ndarray of shape (n_components * n_features,) The solution computed by the optimizer in this iteration.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neighbors\\_nca.py",
    "ast_data": "FunctionDef name:_callback arg:self arg:transformation arguments arg arg If Compare Call"
  },
  {
    "library": "numpy",
    "name": "doc_note",
    "source_code": "def doc_note(initialdoc, note):\n    if initialdoc is None:\n        return\n    if note is None:\n        return initialdoc\n    notesplit = re.split('\\\\n\\\\s*?Notes\\\\n\\\\s*?-----', inspect.cleandoc(initialdoc))\n    notedoc = f'\\n\\nNotes\\n-----\\n{inspect.cleandoc(note)}\\n'\n    return ''.join(notesplit[:1] + [notedoc] + notesplit[1:])",
    "docstring": "Adds a Notes section to an existing docstring.",
    "type": "function",
    "file_path": "numpy\\numpy\\ma\\core.py",
    "ast_data": "FunctionDef name:doc_note arg:initialdoc arg:note arguments arg arg If Compare Return return:no If Compare Return return:yes Assign Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_per_replica_to_tensor",
    "source_code": "def _per_replica_to_tensor(var, dtype=None, name=None, as_ref=False):\n    del name\n    if dtype is not None and (not dtype.is_compatible_with(var.dtype)):\n        raise ValueError('Incompatible type conversion requested to type {!r} for variable of type {!r}'.format(dtype.name, var.dtype.name))\n    if as_ref:\n        raise NotImplementedError(\"PerReplica doesn't support being used as a reference.\")\n    if distribute_lib.in_cross_replica_context() or not distribute_lib.has_strategy():\n        raise ValueError('It looks like you are using a PerReplica object while not inside a replica context, which is not supported. Try running your op or function inside a replica context by using `strategy.run`')\n    else:\n        replica_id = values_util.get_current_replica_id_as_int()\n        return var.values[replica_id]",
    "docstring": "Converts a to a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_per_replica_to_tensor arg:var arg:dtype arg:name arg:as_ref arguments arg arg arg arg If BoolOp Compare Call Raise Call Call If Raise Call If BoolOp Call Call Raise Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "convert_variables_to_constants_v2",
    "source_code": "def convert_variables_to_constants_v2(func, lower_control_flow=True, aggressive_inlining=False):\n    converter_data = _FunctionConverterDataInEager(func=func, lower_control_flow=lower_control_flow, aggressive_inlining=aggressive_inlining)\n    output_graph_def, converted_input_indices = _replace_variables_by_constants(converter_data=converter_data)\n    return _construct_concrete_function(func, output_graph_def, converted_input_indices)",
    "docstring": "Replaces all the variables in a graph with constants of the same values. TensorFlow 2.0 function for converting all Variable ops into Const ops holding the same values. This makes it possible to describe the network fully with a single GraphDef file, and allows the removal of a lot of ops related to loading and saving the variables. This function runs Grappler's function inlining optimization in order to return a single subgraph. The current implementation only works for graphs that do not contain any control flow or embedding related ops. Args: func: ConcreteFunction. lower_control_flow: Boolean indicating whether or not to lower control flow ops such as If and While. (default True) aggressive_inlining: Boolean indicating whether or not to do aggressive function inlining (might be unsafe if function has stateful ops, not properly connected to control outputs). (default False) Returns: ConcreteFunction containing a simplified version of the original.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py",
    "ast_data": "FunctionDef name:convert_variables_to_constants_v2 arg:func arg:lower_control_flow arg:aggressive_inlining arguments arg arg arg Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "to_parquet",
    "source_code": "@doc(storage_options=_shared_docs['storage_options'])\ndef to_parquet(self, path: FilePath | WriteBuffer[bytes] | None=None, *, engine: Literal['auto', 'pyarrow', 'fastparquet']='auto', compression: str | None='snappy', index: bool | None=None, partition_cols: list[str] | None=None, storage_options: StorageOptions | None=None, **kwargs) -> bytes | None:\n    from pandas.io.parquet import to_parquet\n    return to_parquet(self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs)",
    "docstring": "Write a DataFrame to the binary parquet format. This function writes the dataframe as a _. You can choose different parquet backends, and have the option of compression. See :ref: for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing `pandas io fastparquet pyarrow Categorical.remove_unused_categories` on the DataFrame before saving. Examples -------- >>> df = pd.DataFrame(data={{\"col1\": [1, 2], \"col2\": [3, 4]}}) >>> df.to_parquet(\"df.parquet.gzip\", compression=\"gzip\") # doctest: +SKIP >>> pd.read_parquet(\"df.parquet.gzip\") # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read()",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\frame.py",
    "ast_data": "FunctionDef name:to_parquet arg:self arg:path arguments arg arg arg arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "ServiceOptions",
    "source_code": "@tf_export('data.experimental.ServiceOptions')\nclass ServiceOptions(options_lib.OptionsBase):\n    pinned = options_lib.create_option(name='pinned', ty=bool, docstring='If true, the tf.data service client allocates data to pinned memory, which facilitates more efficient copying from host memory to GPU memory downstream. For gRPC, compression must be disabled for this to take effect. For alternative data transfer protocols, this may or may not take effect, depending on the implementation.')\n\n    def _to_proto(self):\n        pb = dataset_options_pb2.ServiceOptions()\n        if self.pinned is not None:\n            pb.pinned = self.pinned\n        return pb\n\n    def _from_proto(self, pb):\n        if pb.WhichOneof('optional_pinned') is not None:\n            self.pinned = pb.pinned",
    "docstring": "Represents options for tf.data service. You can set the service options of a dataset through the property of ; the property is an instance of .",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\options.py",
    "ast_data": "ClassDef name:ServiceOptions Assign Call FunctionDef name:_to_proto arg:self arguments arg Assign Call If Compare Assign Return return:yes FunctionDef name:_from_proto arg:self arg:pb arguments arg arg If Compare Call Assign Call"
  },
  {
    "library": "scikit-learn",
    "name": "classes_",
    "source_code": "@property\ndef classes_(self):\n    return self.estimator_.classes_",
    "docstring": "Classes labels available when is a classifier. Returns ------- ndarray of shape (n_classes,)",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_selection\\_rfe.py",
    "ast_data": "FunctionDef name:classes_ arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "should_record_backprop",
    "source_code": "def should_record_backprop(tensors):\n    return pywrap_tfe.TFE_Py_TapeSetShouldRecordBackprop(tensors)",
    "docstring": "Returns true if any tape in the stack watches any of these tensors. Only takes GradientTapes into account, not forward accumulators. Args: tensors: Tensors to check, typically inputs to an operation. Returns: Boolean, whether any tape watches any of .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py",
    "ast_data": "FunctionDef name:should_record_backprop arg:tensors arguments arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "zeros_like_impl",
    "source_code": "@_tag_zeros_tensor\ndef zeros_like_impl(tensor, dtype, name, optimize=True, layout=None):\n    with ops.name_scope(name, 'zeros_like', [tensor]) as name:\n        return array_like_impl(zeros, gen_array_ops.zeros_like, tensor, dtype, name, optimize=optimize, layout=layout)",
    "docstring": "Internal implementation for the v1/v2 zeros_like API calls.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py",
    "ast_data": "FunctionDef name:zeros_like_impl arg:tensor arg:dtype arg:name arg:optimize arg:layout arguments arg arg arg arg arg With Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "grid",
    "source_code": "@_docstring.interpd\ndef grid(self, visible=None, which='major', axis='both', **kwargs):\n    _api.check_in_list(['x', 'y', 'both'], axis=axis)\n    if axis in ['x', 'both']:\n        self.xaxis.grid(visible, which=which, **kwargs)\n    if axis in ['y', 'both']:\n        self.yaxis.grid(visible, which=which, **kwargs)",
    "docstring": "Configure the grid lines. Parameters ---------- visible : bool or None, optional Whether to show the grid lines. If any *kwargs* are supplied, it is assumed you want the grid on and *visible* will be set to True. If *visible* is *None* and there are no *kwargs*, this toggles the visibility of the lines. which : {'major', 'minor', 'both'}, optional The grid lines to apply the changes on. axis : {'both', 'x', 'y'}, optional The axis to apply the changes on. **kwargs : properties Define the line properties of the grid, e.g.:: grid(color='r', linestyle='-', linewidth=2) Valid keyword arguments are: %(Line2D:kwdoc)s Notes ----- The axis is drawn as a unit, so the effective zorder for drawing the grid is determined by the zorder of each axis, not by the zorder of the objects comprising the grid. Therefore, to set grid zorder, use or, for more control, call the method of each axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py",
    "ast_data": "FunctionDef name:grid arg:self arg:visible arg:which arg:axis arguments arg arg arg arg arg Call If Compare Call If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "calculate_scaled_minmax",
    "source_code": "def calculate_scaled_minmax(self):\n    if self.equalization_scale.nelement() == 1 and self.equalization_scale == torch.tensor(1):\n        warnings.warn('Must call calculate_equalization_scale before calling calculate_scaled_minmax. ' + 'Will not scale the next quantization observer.')\n        return (None, None)\n    min_inputs, max_inputs = self.get_input_minmax()\n    equalization_scale_reshaped = reshape_scale(self.equalization_scale, 0, min_inputs)\n    min_input_scaled = torch.min(torch.mul(min_inputs, equalization_scale_reshaped))\n    max_input_scaled = torch.max(torch.mul(max_inputs, equalization_scale_reshaped))\n    return (min_input_scaled, max_input_scaled)",
    "docstring": "Returns the scaled min/max inputs",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py",
    "ast_data": "FunctionDef name:calculate_scaled_minmax arg:self arguments arg If BoolOp Compare Call Compare Call Call Return return:no Assign Call Assign Call Assign Call Call Assign Call Call Return return:yes"
  },
  {
    "library": "sphinx",
    "name": "type_to_xref",
    "source_code": "def type_to_xref(target: str, env: BuildEnvironment, *, suppress_prefix: bool=False) -> addnodes.pending_xref:\n    if env:\n        kwargs = {'py:module': env.ref_context.get('py:module'), 'py:class': env.ref_context.get('py:class')}\n    else:\n        kwargs = {}\n    reftype, target, title, refspecific = parse_reftarget(target, suppress_prefix)\n    if env.config.python_use_unqualified_type_names:\n        shortname = title.split('.')[-1]\n        contnodes: list[Node] = [pending_xref_condition('', shortname, condition='resolved'), pending_xref_condition('', title, condition='*')]\n    else:\n        contnodes = [nodes.Text(title)]\n    return pending_xref('', *contnodes, refdomain='py', reftype=reftype, reftarget=target, refspecific=refspecific, **kwargs)",
    "docstring": "Convert a type string to a cross reference node.",
    "type": "function",
    "file_path": "sphinx\\sphinx\\domains\\python\\_annotations.py",
    "ast_data": "FunctionDef name:type_to_xref arg:target arg:env arguments arg arg arg If Assign Call Call Assign Assign Call If Assign Call Call Call Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "is_symm_mem_enabled_for_group",
    "source_code": "def is_symm_mem_enabled_for_group(group_name: str) -> bool:\n    return _is_test_mode or group_name in _group_name_to_store",
    "docstring": "Check if symmetric memory is enabled for a process group. Args: group_name (str): the name of the process group.",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py",
    "ast_data": "FunctionDef name:is_symm_mem_enabled_for_group arg:group_name arguments arg Return return:yes BoolOp Compare"
  },
  {
    "library": "pytorch",
    "name": "elu",
    "source_code": "def elu(input: Tensor, alpha: float=1.0, inplace: bool=False) -> Tensor:\n    if has_torch_function_unary(input):\n        return handle_torch_function(elu, (input,), input, alpha=alpha, inplace=inplace)\n    if inplace:\n        result = torch._C._nn.elu_(input, alpha)\n    else:\n        result = torch._C._nn.elu(input, alpha)\n    return result",
    "docstring": "Apply the Exponential Linear Unit (ELU) function element-wise. See :class: for more details.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:elu arg:input arg:alpha arg:inplace arguments arg arg arg If Call Return return:yes Call If Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "addcdiv",
    "source_code": "@register_decomposition(aten.addcdiv)\n@out_wrapper()\n@elementwise_type_promotion_wrapper(type_promoting_args=('self', 'tensor1', 'tensor2'), type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT)\ndef addcdiv(self: TensorLikeType, tensor1: TensorLikeType, tensor2: TensorLikeType, *, value: NumberType=1) -> TensorLikeType:\n    if value is not None:\n        dtype = self.dtype\n        python_type = utils.dtype_to_type(dtype)\n        torch._check_value(utils.is_weakly_lesser_type(type(value), python_type), lambda: f'value argument of type {type(value)} cannot be safely cast to type {python_type}!')\n    return self + value * tensor1 / tensor2",
    "docstring": "Reference implementation of torch.addcdiv",
    "type": "function",
    "file_path": "pytorch\\torch\\_refs\\__init__.py",
    "ast_data": "FunctionDef name:addcdiv arg:self arg:tensor1 arg:tensor2 arguments arg arg arg arg If Compare Assign Assign Call Call Call Call arguments Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_obj_reference_counts",
    "source_code": "@property\ndef _obj_reference_counts(self):\n    self._maybe_create_attribute('_obj_reference_counts_dict', object_identity.ObjectIdentityDictionary())\n    return self._obj_reference_counts_dict",
    "docstring": "A dictionary counting the number of attributes referencing an object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py",
    "ast_data": "FunctionDef name:_obj_reference_counts arg:self arguments arg Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "tuple",
    "source_code": "@property\ndef tuple(self):\n    return tuple((self[i].tuple for i in range(len(self))))",
    "docstring": "Get the tuple for each ring in this Polygon.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py",
    "ast_data": "FunctionDef name:tuple arg:self arguments arg Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_get_attrs_values",
    "source_code": "def _get_attrs_values(obj):\n    attrs = getattr(obj.__class__, '__attrs_attrs__')\n    return [getattr(obj, a.name) for a in attrs]",
    "docstring": "Returns the list of values from an attrs instance.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\client\\session.py",
    "ast_data": "FunctionDef name:_get_attrs_values arg:obj arguments arg Assign Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "get_partition_to_latency_mapping",
    "source_code": "def get_partition_to_latency_mapping(partitions: list[Partition], node_to_latency_mapping: dict[Node, NodeLatency]) -> dict[Partition, PartitionLatency]:\n    partition_to_latency_mapping: dict[Partition, PartitionLatency] = {}\n    for partition in partitions:\n        partition_latency = get_latency_of_one_partition(partition, node_to_latency_mapping)\n        partition_to_latency_mapping[partition] = partition_latency\n    return partition_to_latency_mapping",
    "docstring": "Given all the partitions and node_to_latency_mapping dictionary, return a mapping dictionary of each partition to its overall latency",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\partitioner_utils.py",
    "ast_data": "FunctionDef name:get_partition_to_latency_mapping arg:partitions arg:node_to_latency_mapping arguments arg arg For Assign Call Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_copy_some_through",
    "source_code": "def _maybe_copy_some_through():\n    new_output, new_state = call_cell()\n    nest.assert_same_structure(zero_output, new_output)\n    nest.assert_same_structure(state, new_state)\n    flat_new_state = nest.flatten(new_state)\n    flat_new_output = nest.flatten(new_output)\n    return cond.cond(time < min_sequence_length, lambda: flat_new_output + flat_new_state, lambda: _copy_some_through(flat_new_output, flat_new_state))",
    "docstring": "Run RNN step. Pass through either no or some past state.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\rnn.py",
    "ast_data": "FunctionDef name:_maybe_copy_some_through arguments Assign Call Call Call Assign Call Assign Call Return return:yes Call Compare arguments arguments Call"
  },
  {
    "library": "django",
    "name": "DeclarativeFieldsMetaclass",
    "source_code": "class DeclarativeFieldsMetaclass(MediaDefiningClass):\n\n    def __new__(mcs, name, bases, attrs):\n        attrs['declared_fields'] = {key: attrs.pop(key) for key, value in list(attrs.items()) if isinstance(value, Field)}\n        new_class = super().__new__(mcs, name, bases, attrs)\n        declared_fields = {}\n        for base in reversed(new_class.__mro__):\n            if hasattr(base, 'declared_fields'):\n                declared_fields.update(base.declared_fields)\n            for attr, value in base.__dict__.items():\n                if value is None and attr in declared_fields:\n                    declared_fields.pop(attr)\n        new_class.base_fields = declared_fields\n        new_class.declared_fields = declared_fields\n        return new_class",
    "docstring": "Collect Fields declared on the base classes.",
    "type": "class",
    "file_path": "django\\django\\forms\\forms.py",
    "ast_data": "ClassDef name:DeclarativeFieldsMetaclass FunctionDef name:__new__ arg:mcs arg:name arg:bases arg:attrs arguments arg arg arg arg Assign Call Call Call Call Assign Call Call Assign For Call If Call Call For Call If BoolOp Compare Compare Call Assign Assign Return return:yes"
  },
  {
    "library": "kornia",
    "name": "matrix",
    "source_code": "def matrix(self) -> Tensor:\n    w = self.q.w[..., None]\n    x, y, z = (self.q.x[..., None], self.q.y[..., None], self.q.z[..., None])\n    q0 = 1 - 2 * y ** 2 - 2 * z ** 2\n    q1 = 2 * x * y - 2 * z * w\n    q2 = 2 * x * z + 2 * y * w\n    row0 = concatenate((q0, q1, q2), -1)\n    q0 = 2 * x * y + 2 * z * w\n    q1 = 1 - 2 * x ** 2 - 2 * z ** 2\n    q2 = 2 * y * z - 2 * x * w\n    row1 = concatenate((q0, q1, q2), -1)\n    q0 = 2 * x * z - 2 * y * w\n    q1 = 2 * y * z + 2 * x * w\n    q2 = 1 - 2 * x ** 2 - 2 * y ** 2\n    row2 = concatenate((q0, q1, q2), -1)\n    return stack((row0, row1, row2), -2)",
    "docstring": "Convert the quaternion to a rotation matrix of shape :math:. The matrix is of the form: .. math:: \\begin{bmatrix} 1-2y^2-2z^2 & 2xy-2zw & 2xy+2yw \\\\ 2xy+2zw & 1-2x^2-2z^2 & 2yz-2xw \\\\ 2xz-2yw & 2yz+2xw & 1-2x^2-2y^2\\end{bmatrix} Example: >>> s = So3.identity() >>> m = s.matrix() >>> m tensor([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], grad_fn=)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py",
    "ast_data": "FunctionDef name:matrix arg:self arguments arg Assign Assign Assign Assign Assign Assign Call Assign Assign Assign Assign Call Assign Assign Assign Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "face",
    "source_code": "def face(gray=False):\n    import bz2\n    fname = fetch_data('face.dat')\n    with open(fname, 'rb') as f:\n        rawdata = f.read()\n    face_data = bz2.decompress(rawdata)\n    face = frombuffer(face_data, dtype='uint8')\n    face.shape = (768, 1024, 3)\n    if gray is True:\n        face = (0.21 * face[:, :, 0] + 0.71 * face[:, :, 1] + 0.07 * face[:, :, 2]).astype('uint8')\n    return face",
    "docstring": "Get a 1024 x 768, color image of a raccoon face. The image is derived from Parameters ---------- gray : bool, optional If True return 8-bit grey-scale image, otherwise return a color image Returns ------- face : ndarray image of a raccoon face Examples -------- >>> import scipy.datasets >>> face = scipy.datasets.face() >>> face.shape (768, 1024, 3) >>> face.max() np.uint8(255) >>> import matplotlib.pyplot as plt >>> plt.gray() >>> plt.imshow(face) >>> plt.show()",
    "type": "function",
    "file_path": "scipy\\scipy\\datasets\\_fetchers.py",
    "ast_data": "FunctionDef name:face arg:gray arguments arg Assign Call With Call Assign Call Assign Call Assign Call Assign If Compare Assign Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "LinearTriInterpolator",
    "source_code": "class LinearTriInterpolator(TriInterpolator):\n\n    def __init__(self, triangulation, z, trifinder=None):\n        super().__init__(triangulation, z, trifinder)\n        self._plane_coefficients = self._triangulation.calculate_plane_coefficients(self._z)\n\n    def __call__(self, x, y):\n        return self._interpolate_multikeys(x, y, tri_index=None, return_keys=('z',))[0]\n    __call__.__doc__ = TriInterpolator._docstring__call__\n\n    def gradient(self, x, y):\n        return self._interpolate_multikeys(x, y, tri_index=None, return_keys=('dzdx', 'dzdy'))\n    gradient.__doc__ = TriInterpolator._docstringgradient\n\n    def _interpolate_single_key(self, return_key, tri_index, x, y):\n        _api.check_in_list(['z', 'dzdx', 'dzdy'], return_key=return_key)\n        if return_key == 'z':\n            return self._plane_coefficients[tri_index, 0] * x + self._plane_coefficients[tri_index, 1] * y + self._plane_coefficients[tri_index, 2]\n        elif return_key == 'dzdx':\n            return self._plane_coefficients[tri_index, 0]\n        else:\n            return self._plane_coefficients[tri_index, 1]",
    "docstring": "Linear interpolator on a triangular grid. Each triangle is represented by a plane so that an interpolated value at point (x, y) lies on the plane of the triangle containing (x, y). Interpolated values are therefore continuous across the triangulation, but their first derivatives are discontinuous at edges between triangles. Parameters ---------- triangulation : The triangulation to interpolate over. z : (npoints,) array-like Array of values, defined at grid points, to interpolate between. trifinder : , optional If this is not specified, the Triangulation's default TriFinder will be used by calling . Methods ------- (x, y) : Returns interpolated values at (x, y) points. (x, y) : Returns interpolated derivatives at (x, y) points.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py",
    "ast_data": "ClassDef name:LinearTriInterpolator FunctionDef name:__init__ arg:self arg:triangulation arg:z arg:trifinder arguments arg arg arg arg Call Call Assign Call FunctionDef name:__call__ arg:self arg:x arg:y arguments arg arg arg Return return:yes Call Assign FunctionDef name:gradient arg:self arg:x arg:y arguments arg arg arg Return return:yes Call Assign FunctionDef name:_interpolate_single_key arg:self arg:return_key arg:tri_index arg:x arg:y arguments arg arg arg arg arg Call If Compare Return return:yes If Compare Return return:yes Return return:yes"
  },
  {
    "library": "scikit-learn",
    "name": "_mean_interpolated_score",
    "source_code": "def _mean_interpolated_score(target_thresholds, cv_thresholds, cv_scores):\n    return np.mean([np.interp(target_thresholds, split_thresholds, split_score) for split_thresholds, split_score in zip(cv_thresholds, cv_scores)], axis=0)",
    "docstring": "Compute the mean interpolated score across folds by defining common thresholds. Parameters ---------- target_thresholds : ndarray of shape (thresholds,) The thresholds to use to compute the mean score. cv_thresholds : ndarray of shape (n_folds, thresholds_fold) The thresholds used to compute the scores for each fold. cv_scores : ndarray of shape (n_folds, thresholds_fold) The scores computed for each threshold for each fold. Returns ------- mean_score : ndarray of shape (thresholds,) The mean score across all folds for each target threshold.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_classification_threshold.py",
    "ast_data": "FunctionDef name:_mean_interpolated_score arg:target_thresholds arg:cv_thresholds arg:cv_scores arguments arg arg arg Return return:yes Call Call Call"
  },
  {
    "library": "seaborn",
    "name": "show",
    "source_code": "def show(self, **kwargs) -> None:\n    self.plot(pyplot=True).show(**kwargs)",
    "docstring": "Compile the plot and display it by hooking into pyplot. Calling this method is not necessary to render a plot in notebook context, but it may be in other environments (e.g., in a terminal). After compiling the plot, it calls :func: (passing any keyword parameters). Unlike other :class: methods, there is no return value. This should be the last method you call when specifying a plot.",
    "type": "method",
    "file_path": "seaborn\\seaborn\\_core\\plot.py",
    "ast_data": "FunctionDef name:show arg:self arguments arg arg Call Call"
  },
  {
    "library": "matplotlib",
    "name": "copy_properties",
    "source_code": "def copy_properties(self, gc):\n    self._alpha = gc._alpha\n    self._forced_alpha = gc._forced_alpha\n    self._antialiased = gc._antialiased\n    self._capstyle = gc._capstyle\n    self._cliprect = gc._cliprect\n    self._clippath = gc._clippath\n    self._dashes = gc._dashes\n    self._joinstyle = gc._joinstyle\n    self._linestyle = gc._linestyle\n    self._linewidth = gc._linewidth\n    self._rgb = gc._rgb\n    self._hatch = gc._hatch\n    self._hatch_color = gc._hatch_color\n    self._hatch_linewidth = gc._hatch_linewidth\n    self._url = gc._url\n    self._gid = gc._gid\n    self._snap = gc._snap\n    self._sketch = gc._sketch",
    "docstring": "Copy properties from *gc* to self.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:copy_properties arg:self arg:gc arguments arg arg Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign"
  },
  {
    "library": "scikit-learn",
    "name": "predict",
    "source_code": "def predict(self, X):\n    raw_prediction = self._linear_predictor(X)\n    y_pred = self._base_loss.link.inverse(raw_prediction)\n    return y_pred",
    "docstring": "Predict using GLM with feature matrix X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. Returns ------- y_pred : array of shape (n_samples,) Returns predicted values.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\glm.py",
    "ast_data": "FunctionDef name:predict arg:self arg:X arguments arg arg Assign Call Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "hard_sigmoid",
    "source_code": "@dispatch.add_dispatch_support\ndef hard_sigmoid(x):\n    return backend.hard_sigmoid(x)",
    "docstring": "Hard sigmoid activation function. A faster approximation of the sigmoid activation. Piecewise linear approximation of the sigmoid function. Ref: ' For example: >>> a = tf.constant([-3.0,-1.0, 0.0,1.0,3.0], dtype = tf.float32) >>> b = tf.keras.activations.hard_sigmoid(a) >>> b.numpy() array([0. , 0.3, 0.5, 0.7, 1. ], dtype=float32) Args: x: Input tensor. Returns: The hard sigmoid activation, defined as: - -",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py",
    "ast_data": "FunctionDef name:hard_sigmoid arg:x arguments arg Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "set_output",
    "source_code": "def set_output(self, *, transform=None):\n    super().set_output(transform=transform)\n    transformers = (trans for _, trans, _ in chain(self.transformers, getattr(self, 'transformers_', [])) if trans not in {'passthrough', 'drop'})\n    for trans in transformers:\n        _safe_set_output(trans, transform=transform)\n    if self.remainder not in {'passthrough', 'drop'}:\n        _safe_set_output(self.remainder, transform=transform)\n    return self",
    "docstring": "Set the output container when and are called. Calling will set the output of all estimators in and . Parameters ---------- transform : {\"default\", \"pandas\", \"polars\"}, default=None Configure output of and . - : Default output format of a transformer - : DataFrame output - : Polars output - : Transform configuration is unchanged .. versionadded:: 1.4 option was added. Returns ------- self : estimator instance Estimator instance.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py",
    "ast_data": "FunctionDef name:set_output arg:self arguments arg arg Call Call Assign Call Call Compare For Call If Compare Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_filename_and_line_from_caller",
    "source_code": "def set_filename_and_line_from_caller(self, offset: int=0) -> int:\n    retcode = self.SUCCESS\n    frame = inspect.currentframe()\n    if not frame:\n        return self.FAILURE\n    frame = cast(types.FrameType, frame)\n    for _ in range(offset + 1):\n        parent = frame.f_back\n        if parent is None:\n            retcode = self.HEURISTIC_USED\n            break\n        parent = cast(types.FrameType, parent)\n        frame = parent\n    self.filename = frame.f_code.co_filename\n    self.lineno = cast(int, frame.f_lineno)\n    return retcode",
    "docstring": "Set filename and line using the caller's stack frame. If the requested stack information is not available, a heuristic may be applied and self.HEURISTIC USED will be returned. If the heuristic fails then no change will be made to the filename and lineno members (None by default) and self.FAILURE will be returned. Args: offset: Integer. If 0, the caller's stack frame is used. If 1, the caller's caller's stack frame is used. Larger values are permissible but if out-of-range (larger than the number of stack frames available) the outermost stack frame will be used. Returns: TraceableObject.SUCCESS if appropriate stack information was found, TraceableObject.HEURISTIC_USED if the offset was larger than the stack, and TraceableObject.FAILURE if the stack was empty.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\traceable_stack.py",
    "ast_data": "FunctionDef name:set_filename_and_line_from_caller arg:self arg:offset arguments arg arg Assign Assign Call If Return return:yes Assign Call For Call Assign If Compare Assign Assign Call Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_promote_helper",
    "source_code": "def _promote_helper(self, source_path, new_parent_path):\n    current_field = self.field_value(source_path)\n    new_parent_rank = self.field_value(new_parent_path).rank\n    parent_rank = self.field_value(source_path[:-1]).rank\n    if new_parent_rank == parent_rank:\n        return current_field\n    current_field_rank = current_field.shape.rank\n    if current_field_rank is None:\n        raise ValueError('Cannot determine if dimensions should be merged.')\n    inner_dim = min(parent_rank, current_field_rank - 1)\n    if inner_dim <= new_parent_rank:\n        return current_field\n    return _merge_dims_generic(current_field, new_parent_rank, inner_dim)",
    "docstring": "Creates a promoted field without adding it to the structure. Args: source_path: the source path in the structured tensor. new_parent_path: the new parent path. Must be a prefix of source_path. Returns: a composite tensor of source_path promoted. Raises: ValueError: if the shape of the field is unknown and the right strategy cannot be determined.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_tensor.py",
    "ast_data": "FunctionDef name:_promote_helper arg:self arg:source_path arg:new_parent_path arguments arg arg arg Assign Call Assign Call Assign Call If Compare Return return:yes Assign If Compare Raise Call Assign Call If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "_gather_unique_untuned_gemm_from_files",
    "source_code": "def _gather_unique_untuned_gemm_from_files(filename_pattern: str) -> set[str]:\n    unique_gemm_entries = set()\n    for file_path in glob.glob(filename_pattern):\n        with open(file_path) as file:\n            for line in file:\n                if line.startswith(('Gemm', 'ScaledGemm')):\n                    unique_gemm_entries.add(line)\n    return unique_gemm_entries",
    "docstring": "Process multiple untuned results file and return a set with duplicates removed.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\tunable.py",
    "ast_data": "FunctionDef name:_gather_unique_untuned_gemm_from_files arg:filename_pattern arguments arg Assign Call For Call With Call For If Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "disconnect",
    "source_code": "def disconnect(self, cid):\n    self._observers.disconnect(cid)",
    "docstring": "Remove the callback function with connection id *cid*.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\widgets.py",
    "ast_data": "FunctionDef name:disconnect arg:self arg:cid arguments arg arg Call"
  },
  {
    "library": "pandas",
    "name": "_managle_lambda_list",
    "source_code": "def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:\n    if len(aggfuncs) <= 1:\n        return aggfuncs\n    i = 0\n    mangled_aggfuncs = []\n    for aggfunc in aggfuncs:\n        if com.get_callable_name(aggfunc) == '<lambda>':\n            aggfunc = partial(aggfunc)\n            aggfunc.__name__ = f'<lambda_{i}>'\n            i += 1\n        mangled_aggfuncs.append(aggfunc)\n    return mangled_aggfuncs",
    "docstring": "Possibly mangle a list of aggfuncs. Parameters ---------- aggfuncs : Sequence Returns ------- mangled: list-like A new AggSpec sequence, where lambdas have been converted to have unique names. Notes ----- If just one aggfunc is passed, the name will not be mangled.",
    "type": "function",
    "file_path": "pandas\\pandas\\core\\apply.py",
    "ast_data": "FunctionDef name:_managle_lambda_list arg:aggfuncs arguments arg If Compare Call Return return:yes Assign Assign For If Compare Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "scrapy",
    "name": "MetadataContract",
    "source_code": "class MetadataContract(Contract):\n    name = 'meta'\n\n    def adjust_request_args(self, args: dict[str, Any]) -> dict[str, Any]:\n        args['meta'] = json.loads(' '.join(self.args))\n        return args",
    "docstring": "Contract to set metadata arguments for the request. The value should be JSON-encoded dictionary, e.g.: @meta {\"arg1\": \"some value\"}",
    "type": "class",
    "file_path": "scrapy\\scrapy\\contracts\\default.py",
    "ast_data": "ClassDef name:MetadataContract Assign FunctionDef name:adjust_request_args arg:self arg:args arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "functions_to_serialize",
    "source_code": "@property\ndef functions_to_serialize(self):\n    functions = {}\n    for key, v in self.functions.items():\n        if key in CommonEndpoints.all_functions:\n            functions[key] = v.wrapped_call if isinstance(v, save_impl.LayerCall) else v\n    return functions",
    "docstring": "Returns functions to attach to the root object during serialization.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py",
    "ast_data": "FunctionDef name:functions_to_serialize arg:self arguments arg Assign For Call If Compare Assign Call Return return:yes"
  },
  {
    "library": "numpy",
    "name": "_concrete_ndptr",
    "source_code": "class _concrete_ndptr(_ndptr):\n\n    def _check_retval_(self):\n        return self.contents\n\n    @property\n    def contents(self):\n        full_dtype = np.dtype((self._dtype_, self._shape_))\n        full_ctype = ctypes.c_char * full_dtype.itemsize\n        buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents\n        return np.frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)",
    "docstring": "Like _ndptr, but with and specified. Notably, this means the pointer has enough information to reconstruct the array, which is not generally true.",
    "type": "class",
    "file_path": "numpy\\numpy\\ctypeslib\\_ctypeslib.py",
    "ast_data": "ClassDef name:_concrete_ndptr FunctionDef name:_check_retval_ arg:self arguments arg Return return:yes FunctionDef name:contents arg:self arguments arg Assign Call Assign Assign Call Call Return return:yes Call Call"
  },
  {
    "library": "scipy",
    "name": "Paviani",
    "source_code": "class Paviani(Benchmark):\n\n    def __init__(self, dimensions=10):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = list(zip([2.001] * self.N, [9.999] * self.N))\n        self.global_optimum = [[9.350266 for _ in range(self.N)]]\n        self.fglob = -45.7784684040686\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        return sum(log(x - 2) ** 2.0 + log(10.0 - x) ** 2.0) - prod(x) ** 0.2",
    "docstring": "Paviani objective function. This class defines the Paviani [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Paviani}}(x) = \\sum_{i=1}^{10} \\left[\\log^{2}\\left(10 - x_i\\right) + \\log^{2}\\left(x_i -2\\right)\\right] - \\left(\\prod_{i=1}^{10} x_i^{10} \\right)^{0.2} with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: think Gavana web/code definition is wrong because final product term shouldn't raise x to power 10.",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py",
    "ast_data": "ClassDef name:Paviani FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Call Call Assign Call Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Return return:yes Call Call Call Call"
  },
  {
    "library": "pytorch",
    "name": "soft_margin_loss",
    "source_code": "def soft_margin_loss(input: Tensor, target: Tensor, size_average: Optional[bool]=None, reduce: Optional[bool]=None, reduction: str='mean') -> Tensor:\n    if has_torch_function_variadic(input, target):\n        return handle_torch_function(soft_margin_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction)\n    if size_average is not None or reduce is not None:\n        reduction_enum = _Reduction.legacy_get_enum(size_average, reduce)\n    else:\n        reduction_enum = _Reduction.get_enum(reduction)\n    return torch._C._nn.soft_margin_loss(input, target, reduction_enum)",
    "docstring": "Compute the soft margin loss. See :class: for details. Args: input (Tensor): Predicted values. target (Tensor): Ground truth values. size_average (bool, optional): Deprecated (see :attr:). reduce (bool, optional): Deprecated (see :attr:). reduction (str, optional): Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'. 'mean': the mean of the output is taken. 'sum': the output will be summed. 'none': no reduction will be applied. Default: 'mean'. Returns: Tensor: Soft margin loss.",
    "type": "function",
    "file_path": "pytorch\\torch\\nn\\functional.py",
    "ast_data": "FunctionDef name:soft_margin_loss arg:input arg:target arg:size_average arg:reduce arg:reduction arguments arg arg arg arg arg If Call Return return:yes Call If BoolOp Compare Compare Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "authlib",
    "name": "as_json",
    "source_code": "def as_json(self, is_private=False, **params):\n    obj = self.as_dict(is_private, **params)\n    return json_dumps(obj)",
    "docstring": "Represent this key set as a JSON string.",
    "type": "method",
    "file_path": "authlib\\authlib\\jose\\rfc7517\\key_set.py",
    "ast_data": "FunctionDef name:as_json arg:self arg:is_private arguments arg arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "django",
    "name": "__setitem__",
    "source_code": "def __setitem__(self, index, value):\n    if isinstance(value, (list, tuple)):\n        pass\n    elif numpy and isinstance(value, numpy.ndarray):\n        pass\n    else:\n        raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).')\n    if self.dims == 3 and self._z:\n        n_args = 3\n        point_setter = self._set_point_3d\n    else:\n        n_args = 2\n        point_setter = self._set_point_2d\n    if len(value) != n_args:\n        raise TypeError('Dimension of value does not match.')\n    self._checkindex(index)\n    point_setter(index, value)",
    "docstring": "Set the coordinate sequence value at the given index.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:__setitem__ arg:self arg:index arg:value arguments arg arg arg If Call If BoolOp Call Raise Call If BoolOp Compare Assign Assign Assign Assign If Compare Call Raise Call Call Call"
  },
  {
    "library": "cryptography",
    "name": "encode_public",
    "source_code": "def encode_public(self, public_key: rsa.RSAPublicKey, f_pub: _FragList) -> None:\n    pubn = public_key.public_numbers()\n    f_pub.put_mpint(pubn.e)\n    f_pub.put_mpint(pubn.n)",
    "docstring": "Write RSA public key",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:encode_public arg:self arg:public_key arg:f_pub arguments arg arg arg Assign Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_Adder",
    "source_code": "class _Adder(metaclass=abc.ABCMeta):\n\n    @property\n    def name(self):\n        return self.__class__.__name__\n\n    @abc.abstractmethod\n    def can_add(self, op1, op2):\n        pass\n\n    @abc.abstractmethod\n    def _add(self, op1, op2, operator_name, hints):\n        pass\n\n    def add(self, op1, op2, operator_name, hints=None):\n        updated_hints = _infer_hints_allowing_override(op1, op2, hints)\n        if operator_name is None:\n            operator_name = 'Add/' + op1.name + '__' + op2.name + '/'\n        scope_name = self.name\n        if scope_name.startswith('_'):\n            scope_name = scope_name[1:]\n        with ops.name_scope(scope_name):\n            return self._add(op1, op2, operator_name, updated_hints)",
    "docstring": "Abstract base class to add two operators. Each acts independently, adding everything it can, paying no attention as to whether another could have done the addition more efficiently.",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_addition.py",
    "ast_data": "ClassDef name:_Adder FunctionDef name:name arg:self arguments arg Return return:yes FunctionDef name:can_add arg:self arg:op1 arg:op2 arguments arg arg arg FunctionDef name:_add arg:self arg:op1 arg:op2 arg:operator_name arg:hints arguments arg arg arg arg arg FunctionDef name:add arg:self arg:op1 arg:op2 arg:operator_name arg:hints arguments arg arg arg arg arg Assign Call If Compare Assign Assign If Call Assign With Call Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "get_font_preamble",
    "source_code": "@classmethod\ndef get_font_preamble(cls):\n    font_preamble, command = cls._get_font_preamble_and_command()\n    return font_preamble",
    "docstring": "Return a string containing font configuration for the tex preamble.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py",
    "ast_data": "FunctionDef name:get_font_preamble arg:cls arguments arg Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "get_public",
    "source_code": "def get_public(self, data: memoryview) -> tuple[tuple, memoryview]:\n    p, data = _get_mpint(data)\n    q, data = _get_mpint(data)\n    g, data = _get_mpint(data)\n    y, data = _get_mpint(data)\n    return ((p, q, g, y), data)",
    "docstring": "DSA public fields",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\serialization\\ssh.py",
    "ast_data": "FunctionDef name:get_public arg:self arg:data arguments arg arg Assign Call Assign Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "django",
    "name": "iterator",
    "source_code": "def iterator(self, chunk_size=None):\n    if chunk_size is None:\n        if self._prefetch_related_lookups:\n            raise ValueError('chunk_size must be provided when using QuerySet.iterator() after prefetch_related().')\n    elif chunk_size <= 0:\n        raise ValueError('Chunk size must be strictly positive.')\n    use_chunked_fetch = not connections[self.db].settings_dict.get('DISABLE_SERVER_SIDE_CURSORS')\n    return self._iterator(use_chunked_fetch, chunk_size)",
    "docstring": "An iterator over the results from applying this QuerySet to the database. chunk_size must be provided for QuerySets that prefetch related objects. Otherwise, a default chunk_size of 2000 is supplied.",
    "type": "method",
    "file_path": "django\\django\\db\\models\\query.py",
    "ast_data": "FunctionDef name:iterator arg:self arg:chunk_size arguments arg arg If Compare If Raise Call If Compare Raise Call Assign Call Return return:yes Call"
  },
  {
    "library": "cherrypy",
    "name": "stop",
    "source_code": "def stop(self):\n    self.ready = False",
    "docstring": "Stop the HTTP server.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\process\\servers.py",
    "ast_data": "FunctionDef name:stop arg:self arguments arg Assign"
  },
  {
    "library": "tensorflow",
    "name": "eval",
    "source_code": "def eval(self, session=None):\n    return self._variable.eval(session=session)",
    "docstring": "In a session, computes and returns the value of this variable. This is not a graph construction method, it does not add ops to the graph. This convenience method requires a session where the graph containing this variable has been launched. If no session is passed, the default session is used. See for more information on launching a graph and on sessions. Args: session: The session to use to evaluate this variable. If none, the default session is used. Returns: A numpy with a copy of the value of this variable.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py",
    "ast_data": "FunctionDef name:eval arg:self arg:session arguments arg arg Return return:yes Call"
  },
  {
    "library": "numpy",
    "name": "RelOp",
    "source_code": "class RelOp(Enum):\n    EQ = 1\n    NE = 2\n    LT = 3\n    LE = 4\n    GT = 5\n    GE = 6\n\n    @classmethod\n    def fromstring(cls, s, language=Language.C):\n        if language is Language.Fortran:\n            return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE, '.lt.': RelOp.LT, '.le.': RelOp.LE, '.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()]\n        return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT, '<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s]\n\n    def tostring(self, language=Language.C):\n        if language is Language.Fortran:\n            return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.', RelOp.LT: '.lt.', RelOp.LE: '.le.', RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self]\n        return {RelOp.EQ: '==', RelOp.NE: '!=', RelOp.LT: '<', RelOp.LE: '<=', RelOp.GT: '>', RelOp.GE: '>='}[self]",
    "docstring": "Used in Op.RELATIONAL expression to specify the function part.",
    "type": "class",
    "file_path": "numpy\\numpy\\f2py\\symbolic.py",
    "ast_data": "ClassDef name:RelOp Assign Assign Assign Assign Assign Assign FunctionDef name:fromstring arg:cls arg:s arg:language arguments arg arg arg If Compare Return return:yes Call Return return:yes FunctionDef name:tostring arg:self arg:language arguments arg arg If Compare Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "generate_model_report",
    "source_code": "def generate_model_report(self, remove_inserted_observers: bool) -> dict[str, tuple[str, dict]]:\n    if not self._prepared_flag:\n        raise Exception('Cannot generate report without preparing model for callibration')\n    if self._removed_observers:\n        raise Exception('Cannot generate report on model you already removed observers from')\n    reports_of_interest = {}\n    for detector in self._desired_report_detectors:\n        report_output = detector.generate_detector_report(self._model)\n        reports_of_interest[detector.get_detector_name()] = report_output\n    if remove_inserted_observers:\n        self._removed_observers = True\n        all_observers_of_interest: set[str] = set()\n        for desired_report in self._detector_name_to_observer_fqns:\n            observers_of_interest = self._detector_name_to_observer_fqns[desired_report]\n            all_observers_of_interest.update(observers_of_interest)\n        for observer_fqn in all_observers_of_interest:\n            self._model.delete_submodule(observer_fqn)\n            node_obj = self._get_node_from_fqn(observer_fqn)\n            if node_obj:\n                self._model.graph.erase_node(node_obj)\n            else:\n                raise ValueError('Node no longer exists in GraphModule structure')\n        self._model.recompile()\n    saved_reports: dict[str, dict] = {report_name: report_tuple[1] for report_name, report_tuple in reports_of_interest.items()}\n    self._generated_reports = saved_reports\n    return reports_of_interest",
    "docstring": "Generates all the requested reports. Note: You should have callibrated the model with relevant data before calling this The reports generated are specified by the desired_reports specified in desired_reports Can optionally remove all the observers inserted by the ModelReport instance Args: remove_inserted_observers (bool): True to remove the observers inserted by this ModelReport instance Returns a mapping of each desired report name to a tuple with: The textual summary of that report information A dictionary containing relevant statistics or information for that report Note: Throws exception if we try to generate report on model we already removed observers from Throws exception if we try to generate report without preparing for callibration",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py",
    "ast_data": "FunctionDef name:generate_model_report arg:self arg:remove_inserted_observers arguments arg arg If Raise Call If Raise Call Assign For Assign Call Assign Call If Assign Call For Assign Call For Call Assign Call If Call Raise Call Call Call Assign Return return:yes"
  },
  {
    "library": "scipy",
    "name": "read_dtype",
    "source_code": "def read_dtype(mat_stream, a_dtype):\n    num_bytes = a_dtype.itemsize\n    arr = np.ndarray(shape=(), dtype=a_dtype, buffer=mat_stream.read(num_bytes), order='F')\n    return arr",
    "docstring": "Generic get of byte stream data of known type Parameters ---------- mat_stream : file_like object MATLAB (tm) mat file stream a_dtype : dtype dtype of array to read. is assumed to be correct endianness. Returns ------- arr : ndarray Array of dtype read from stream.",
    "type": "function",
    "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py",
    "ast_data": "FunctionDef name:read_dtype arg:mat_stream arg:a_dtype arguments arg arg Assign Assign Call Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_param_to_rank",
    "source_code": "@property\ndef _param_to_rank(self) -> dict[torch.Tensor, int]:\n    if len(self._param_to_rank_cache) == 0:\n        for rank, param_groups in enumerate(self._partition_parameters()):\n            for param_group in param_groups:\n                for param in param_group['params']:\n                    self._param_to_rank_cache[param] = rank\n    return self._param_to_rank_cache",
    "docstring": ":class: mapping parameters to their assigned data parallel rank in the partition.",
    "type": "method",
    "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py",
    "ast_data": "FunctionDef name:_param_to_rank arg:self arguments arg If Compare Call For Call Call For For Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "get_timestamped_export_dir",
    "source_code": "def get_timestamped_export_dir(export_dir_base):\n    attempts = 0\n    while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:\n        timestamp = int(time.time())\n        result_dir = file_io.join(compat.as_bytes(export_dir_base), compat.as_bytes(str(timestamp)))\n        if not gfile.Exists(result_dir):\n            return result_dir\n        time.sleep(1)\n        attempts += 1\n        logging.warn('Directory {} already exists; retrying (attempt {}/{})'.format(compat.as_str(result_dir), attempts, MAX_DIRECTORY_CREATION_ATTEMPTS))\n    raise RuntimeError(f'Failed to obtain a unique export directory name after {MAX_DIRECTORY_CREATION_ATTEMPTS} attempts.')",
    "docstring": "Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_utils.py",
    "ast_data": "FunctionDef name:get_timestamped_export_dir arg:export_dir_base arguments arg Assign While Compare Assign Call Call Assign Call Call Call Call If Call Return return:yes Call Call Call Call Raise Call"
  },
  {
    "library": "matplotlib",
    "name": "default_units",
    "source_code": "@staticmethod\ndef default_units(x, axis):\n    return None",
    "docstring": "Return the default unit for *x* or `` for the given axis.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\units.py",
    "ast_data": "FunctionDef name:default_units arg:x arg:axis arguments arg arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_infer_column_defaults",
    "source_code": "def _infer_column_defaults(filenames, num_cols, field_delim, use_quote_delim, na_value, header, num_rows_for_inference, select_columns, file_io_fn):\n    if select_columns is None:\n        select_columns = range(num_cols)\n    inferred_types = [None] * len(select_columns)\n    for i, csv_row in enumerate(_next_csv_row(filenames, num_cols, field_delim, use_quote_delim, header, file_io_fn)):\n        if num_rows_for_inference is not None and i >= num_rows_for_inference:\n            break\n        for j, col_index in enumerate(select_columns):\n            inferred_types[j] = _infer_type(csv_row[col_index], na_value, inferred_types[j])\n    inferred_types = [t or dtypes.string for t in inferred_types]\n    return [constant_op.constant([0 if t is not dtypes.string else ''], dtype=t) for t in inferred_types]",
    "docstring": "Infers column types from the first N valid CSV records of files.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\readers.py",
    "ast_data": "FunctionDef name:_infer_column_defaults arg:filenames arg:num_cols arg:field_delim arg:use_quote_delim arg:na_value arg:header arg:num_rows_for_inference arg:select_columns arg:file_io_fn arguments arg arg arg arg arg arg arg arg arg If Compare Assign Call Assign Call For Call Call If BoolOp Compare Compare For Call Assign Call Assign BoolOp Return return:yes Call Compare"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, copy_from=None, state=None, alg=None):\n    if distribute_lib.has_strategy():\n        self._distribution_strategy = distribute_lib.get_strategy()\n    else:\n        self._distribution_strategy = None\n    if copy_from is not None:\n        assert (alg or state) is None\n        self._state_var = self._create_variable(copy_from.state, dtype=STATE_TYPE, trainable=False)\n        self._alg = copy_from.algorithm\n    else:\n        assert alg is not None and state is not None\n        alg = random_ops_util.convert_alg_to_int(alg)\n        if isinstance(state, variables.Variable):\n            _check_state_shape(state.shape, alg)\n            self._state_var = state\n        else:\n            state = _convert_to_state_tensor(state)\n            _check_state_shape(state.shape, alg)\n            self._state_var = self._create_variable(state, dtype=STATE_TYPE, trainable=False)\n        self._alg = alg",
    "docstring": "Creates a generator. The new generator will be initialized by one of the following ways, with decreasing precedence: (1) If is not None, the new generator is initialized by copying information from another generator. (2) If and are not None (they must be set together), the new generator is initialized by a state. Args: copy_from: a generator to be copied from. state: a vector of dtype STATE_TYPE representing the initial state of the RNG, whose length and semantics are algorithm-specific. If it's a variable, the generator will reuse it instead of creating a new variable. alg: the RNG algorithm. Possible values are for the Philox algorithm and for the ThreeFry algorithm (see paper 'Parallel Random Numbers: As Easy as 1, 2, 3' [ The string names and can also be used. Note guarantees the same numbers are produced (given the same random state) across all architectures (CPU, GPU, XLA etc).",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:copy_from arg:state arg:alg arguments arg arg arg arg If Call Assign Call Assign If Compare Compare BoolOp Assign Call Assign BoolOp Compare Compare Assign Call If Call Call Assign Assign Call Call Assign Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, default_font_prop: FontProperties, load_glyph_flags: LoadFlags):\n    self.default_font_prop = default_font_prop\n    self.load_glyph_flags = load_glyph_flags",
    "docstring": "Parameters ---------- default_font_prop : The default non-math font, or the base font for Unicode (generic) font rendering. load_glyph_flags : Flags passed to the glyph loader (e.g. `` for FreeType-based fonts).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:default_font_prop arg:load_glyph_flags arguments arg arg arg Assign Assign"
  },
  {
    "library": "numpy",
    "name": "HermiteE",
    "source_code": "class HermiteE(ABCPolyBase):\n    _add = staticmethod(hermeadd)\n    _sub = staticmethod(hermesub)\n    _mul = staticmethod(hermemul)\n    _div = staticmethod(hermediv)\n    _pow = staticmethod(hermepow)\n    _val = staticmethod(hermeval)\n    _int = staticmethod(hermeint)\n    _der = staticmethod(hermeder)\n    _fit = staticmethod(hermefit)\n    _line = staticmethod(hermeline)\n    _roots = staticmethod(hermeroots)\n    _fromroots = staticmethod(hermefromroots)\n    domain = np.array(hermedomain)\n    window = np.array(hermedomain)\n    basis_name = 'He'",
    "docstring": "An HermiteE series class. The HermiteE class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the attributes and methods listed below. Parameters ---------- coef : array_like HermiteE coefficients in order of increasing degree, i.e, `domain` for its use. The default value is [-1., 1.]. symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. The symbol must be a valid Python identifier. Default value is 'x'. .. versionadded:: 1.24",
    "type": "class",
    "file_path": "numpy\\numpy\\polynomial\\hermite_e.py",
    "ast_data": "ClassDef name:HermiteE Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign Call Assign"
  },
  {
    "library": "tensorflow",
    "name": "pending_exits_count",
    "source_code": "@property\ndef pending_exits_count(self):\n    return self._pending_exits_count",
    "docstring": "The number of exits we expect to see but haven't.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py",
    "ast_data": "FunctionDef name:pending_exits_count arg:self arguments arg Return return:yes"
  },
  {
    "library": "kornia",
    "name": "pad",
    "source_code": "def pad(self, padding_size: Tensor) -> 'Keypoints3D':\n    raise NotImplementedError",
    "docstring": "Pad a bounding keypoints. Args: padding_size: (B, 6)",
    "type": "method",
    "file_path": "kornia\\kornia\\geometry\\keypoints.py",
    "ast_data": "FunctionDef name:pad arg:self arg:padding_size arguments arg arg Raise"
  },
  {
    "library": "pytorch",
    "name": "key_get",
    "source_code": "def key_get(obj: Any, kp: KeyPath) -> Any:\n    for k in kp:\n        obj = k.get(obj)\n    return obj",
    "docstring": "Given an object and a key path, return the value at the key path.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\_pytree.py",
    "ast_data": "FunctionDef name:key_get arg:obj arg:kp arguments arg arg For Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "is_compiling",
    "source_code": "def is_compiling() -> bool:\n    return torch.compiler.is_compiling()",
    "docstring": "Indicates whether we are tracing/compiling with torch.compile() or torch.export().",
    "type": "function",
    "file_path": "pytorch\\torch\\_dynamo\\external_utils.py",
    "ast_data": "FunctionDef name:is_compiling arguments Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "def transform(self, raw_documents):\n    if isinstance(raw_documents, str):\n        raise ValueError('Iterable over raw text documents expected, string object received.')\n    self._check_vocabulary()\n    _, X = self._count_vocab(raw_documents, fixed_vocab=True)\n    if self.binary:\n        X.data.fill(1)\n    return X",
    "docstring": "Transform documents to document-term matrix. Extract token counts out of raw text documents using the vocabulary fitted with fit or the one provided to the constructor. Parameters ---------- raw_documents : iterable An iterable which generates either str, unicode or file objects. Returns ------- X : sparse matrix of shape (n_samples, n_features) Document-term matrix.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py",
    "ast_data": "FunctionDef name:transform arg:self arg:raw_documents arguments arg arg If Call Raise Call Call Assign Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "find_device_for",
    "source_code": "def find_device_for(partition: Partition):\n    for d in device_to_left_mem_bytes:\n        extra_size_needed = calculate_extra_mem_bytes_needed_for(partition, device_to_partitions[d])\n        if extra_size_needed < device_to_left_mem_bytes[d]:\n            device_to_partitions[d].append(partition)\n            partition.logical_device_ids.append(d.logical_id)\n            device_to_left_mem_bytes[d] -= extra_size_needed\n            return True\n    return False",
    "docstring": "Given a partition, find a logical device for the partition The algorithm is to put the partition on the device that has just enough mem left for that partition. device_to_left_mem_bytes is a dictionary between device and its left mem size sorted by its left mem size",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py",
    "ast_data": "FunctionDef name:find_device_for arg:partition arguments arg For Assign Call If Compare Call Call Return return:yes Return return:yes"
  },
  {
    "library": "scipy",
    "name": "time_dist_weighted",
    "source_code": "def time_dist_weighted(self, metric):\n    getattr(distance, self.metric)(self.points[0], self.points[1], **self.kwargs)",
    "docstring": "Time weighted distance metrics individually (without batching with cdist or pdist).",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py",
    "ast_data": "FunctionDef name:time_dist_weighted arg:self arg:metric arguments arg arg Call Call"
  },
  {
    "library": "scipy",
    "name": "assert_array_almost_equal",
    "source_code": "def assert_array_almost_equal(actual, desired, decimal=6, *args, **kwds):\n    rtol, atol = (0, 1.5 * 10 ** (-decimal))\n    return xp_assert_close(actual, desired, *args, atol=atol, rtol=rtol, check_dtype=False, check_shape=False, **kwds)",
    "docstring": "Backwards compatible replacement. In new code, use xp_assert_close instead.",
    "type": "function",
    "file_path": "scipy\\scipy\\_lib\\_array_api.py",
    "ast_data": "FunctionDef name:assert_array_almost_equal arg:actual arg:desired arg:decimal arguments arg arg arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "nonsingular",
    "source_code": "def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):\n    if not np.isfinite(vmin) or not np.isfinite(vmax):\n        return (-expander, expander)\n    swapped = False\n    if vmax < vmin:\n        vmin, vmax = (vmax, vmin)\n        swapped = True\n    vmin, vmax = map(float, [vmin, vmax])\n    maxabsvalue = max(abs(vmin), abs(vmax))\n    if maxabsvalue < 1000000.0 / tiny * np.finfo(float).tiny:\n        vmin = -expander\n        vmax = expander\n    elif vmax - vmin <= maxabsvalue * tiny:\n        if vmax == 0 and vmin == 0:\n            vmin = -expander\n            vmax = expander\n        else:\n            vmin -= expander * abs(vmin)\n            vmax += expander * abs(vmax)\n    if swapped and (not increasing):\n        vmin, vmax = (vmax, vmin)\n    return (vmin, vmax)",
    "docstring": "Modify the endpoints of a range as needed to avoid singularities. Parameters ---------- vmin, vmax : float The initial endpoints. expander : float, default: 0.001 Fractional amount by which *vmin* and *vmax* are expanded if the original interval is too small, based on *tiny*. tiny : float, default: 1e-15 Threshold for the ratio of the interval to the maximum absolute value of its endpoints. If the interval is smaller than this, it will be expanded. This value should be around 1e-15 or larger; otherwise the interval will be approaching the double precision resolution limit. increasing : bool, default: True If True, swap *vmin*, *vmax* if *vmin* > *vmax*. Returns ------- vmin, vmax : float Endpoints, expanded and/or swapped if necessary. If either input is inf or NaN, or if both inputs are 0 or very close to zero, it returns -*expander*, *expander*.",
    "type": "function",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:nonsingular arg:vmin arg:vmax arg:expander arg:tiny arg:increasing arguments arg arg arg arg arg If BoolOp Call Call Return return:yes Assign If Compare Assign Assign Assign Call Assign Call Call Call If Compare Call Assign Assign If Compare If BoolOp Compare Compare Assign Assign Call Call If BoolOp Assign Return return:yes"
  },
  {
    "library": "pandas",
    "name": "metadata",
    "source_code": "@property\ndef metadata(self) -> dict[str, pd.Index]:\n    return {'pandas.index': self._col.index}",
    "docstring": "Store specific metadata of the column.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\interchange\\column.py",
    "ast_data": "FunctionDef name:metadata arg:self arguments arg Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "get_heights",
    "source_code": "def get_heights(self):\n    return self._heights * 2",
    "docstring": "Set the lengths of second axes (e.g., minor axes).",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\collections.py",
    "ast_data": "FunctionDef name:get_heights arg:self arguments arg Return return:yes"
  },
  {
    "library": "cherrypy",
    "name": "_apply",
    "source_code": "def _apply(self, config):\n    if isinstance(config.get('global'), dict):\n        if len(config) > 1:\n            cherrypy.checker.global_config_contained_paths = True\n        config = config['global']\n    if 'tools.staticdir.dir' in config:\n        config['tools.staticdir.section'] = 'global'\n    super(Config, self)._apply(config)",
    "docstring": "Update self from a dict.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\_cpconfig.py",
    "ast_data": "FunctionDef name:_apply arg:self arg:config arguments arg arg If Call Call If Compare Call Assign Assign If Compare Assign Call Call"
  },
  {
    "library": "sphinx",
    "name": "ApplicationError",
    "source_code": "class ApplicationError(SphinxError):\n    category = 'Application error'",
    "docstring": "Application initialization error.",
    "type": "class",
    "file_path": "sphinx\\sphinx\\errors.py",
    "ast_data": "ClassDef name:ApplicationError Assign"
  },
  {
    "library": "scipy",
    "name": "to_ss",
    "source_code": "def to_ss(self):\n    return StateSpace(*tf2ss(self.num, self.den), **self._dt_dict)",
    "docstring": "Convert system representation to . Returns ------- sys : instance of State space model of the current system",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_ltisys.py",
    "ast_data": "FunctionDef name:to_ss arg:self arguments arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "__call__",
    "source_code": "def __call__(self):\n    raise NotImplementedError('must be implemented in descendants')",
    "docstring": "Perform one step of this training algorithm.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\step_fn.py",
    "ast_data": "FunctionDef name:__call__ arg:self arguments arg Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_CUDAAllocator",
    "source_code": "class _CUDAAllocator:\n\n    def __init__(self, allocator: torch._C._cuda_CUDAAllocator):\n        self._allocator = allocator\n\n    def allocator(self):\n        return self._allocator",
    "docstring": "Wrapper over internal CUDA memory allocators.",
    "type": "class",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "ClassDef name:_CUDAAllocator FunctionDef name:__init__ arg:self arg:allocator arguments arg arg Assign FunctionDef name:allocator arg:self arguments arg Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "cache_clear",
    "source_code": "@classmethod\ndef cache_clear(cls) -> None:\n    cls.cache_hits = 0\n    cls.cache_misses = 0\n    cls.cache_bypasses.clear()\n    cls.cache.clear()",
    "docstring": "Clear the dispatch cache.",
    "type": "method",
    "file_path": "pytorch\\torch\\_subclasses\\fake_tensor.py",
    "ast_data": "FunctionDef name:cache_clear arg:cls arguments arg Assign Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "read_string",
    "source_code": "def read_string(self, name: str) -> str:\n    data = self.read_bytes(name)\n    return data.decode()",
    "docstring": "Read a string object from the archive. name: The source file inside the archive.",
    "type": "method",
    "file_path": "pytorch\\torch\\export\\pt2_archive\\_package.py",
    "ast_data": "FunctionDef name:read_string arg:self arg:name arguments arg arg Assign Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_add_ragged_feature",
    "source_code": "def _add_ragged_feature(self, key, feature):\n    value_key = key if feature.value_key is None else feature.value_key\n    self._add_ragged_key(value_key, feature.dtype, feature.row_splits_dtype)\n    for partition in feature.partitions:\n        if not isinstance(partition, RaggedFeature.UniformRowLength):\n            self._add_ragged_key(partition.key, dtypes.int64, feature.row_splits_dtype)",
    "docstring": "Adds a RaggedFeature.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\parsing_config.py",
    "ast_data": "FunctionDef name:_add_ragged_feature arg:self arg:key arg:feature arguments arg arg arg Assign Compare Call For If Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_clone_layers_and_model_config",
    "source_code": "def _clone_layers_and_model_config(model, input_layers, layer_fn):\n    created_layers = {}\n\n    def _copy_layer(layer):\n        if layer in input_layers:\n            created_layers[layer.name] = input_layers[layer]\n        elif layer in model._input_layers:\n            created_layers[layer.name] = InputLayer(**layer.get_config())\n        else:\n            created_layers[layer.name] = layer_fn(layer)\n        return {}\n    config = functional.get_network_config(model, serialize_layer_fn=_copy_layer)\n    return (config, created_layers)",
    "docstring": "Clones all layers, and returns the model config without serializing layers. This function ensures that only the node graph is retrieved when getting the model config. The used to clone layers might not rely on , so some custom layers do not define . Trying to retrieve the config results in errors. Args: model: A Functional model. input_layers: Dictionary mapping input layers in to new input layers layer_fn: Function used to clone all non-input layers. Returns: Model config object, and a dictionary of newly created layers.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\models.py",
    "ast_data": "FunctionDef name:_clone_layers_and_model_config arg:model arg:input_layers arg:layer_fn arguments arg arg arg Assign FunctionDef name:_copy_layer arg:layer arguments arg If Compare Assign If Compare Assign Call Call Assign Call Return return:no Assign Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "delta_f",
    "source_code": "@property\ndef delta_f(self) -> float:\n    return 1 / (self.mfft * self.T)",
    "docstring": "Width of the frequency bins of the STFT. Return the frequency interval = 1 / ( * ). See Also -------- delta_t: Time increment of STFT. f_pts: Number of points along the frequency axis. f: Frequencies values of the STFT. mfft: Length of the input for FFT used. T: Sampling interval. t: Times of STFT for an input signal with samples. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:delta_f arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_write_object_proto",
    "source_code": "def _write_object_proto(self, proto, options):\n    write_object_proto_for_resource_variable(self, proto, options)",
    "docstring": "Writes additional information of the variable into the SavedObject proto. Subclasses of ResourceVariables could choose to override this method to customize extra information to provide when saving a SavedModel. Ideally, this should contain the logic in write_object_proto_for_resource_variable but is an outlier at the momemnt. Once becomes a proper ResourceVariable, we should remove the helper method below. Args: proto: proto to update. options: A instance that configures save behavior.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:_write_object_proto arg:self arg:proto arg:options arguments arg arg arg Call"
  },
  {
    "library": "scipy",
    "name": "fresnel_zeros",
    "source_code": "def fresnel_zeros(nt):\n    if floor(nt) != nt or nt <= 0 or (not isscalar(nt)):\n        raise ValueError('Argument must be positive scalar integer.')\n    return (_specfun.fcszo(2, nt), _specfun.fcszo(1, nt))",
    "docstring": "Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z). Parameters ---------- nt : int Number of zeros to compute Returns ------- zeros_sine: ndarray Zeros of the sine Fresnel integral zeros_cosine : ndarray Zeros of the cosine Fresnel integral References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special Functions\", John Wiley and Sons, 1996.",
    "type": "function",
    "file_path": "scipy\\scipy\\special\\_basic.py",
    "ast_data": "FunctionDef name:fresnel_zeros arg:nt arguments arg If BoolOp Compare Call Compare Call Raise Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "script_lnlstm",
    "source_code": "def script_lnlstm(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=False, bidirectional=False, decompose_layernorm=False):\n    assert bias\n    assert not batch_first\n    assert not dropout\n    if bidirectional:\n        stack_type = StackedLSTM2\n        layer_type = BidirLSTMLayer\n        dirs = 2\n    else:\n        stack_type = StackedLSTM\n        layer_type = LSTMLayer\n        dirs = 1\n    return stack_type(num_layers, layer_type, first_layer_args=[LayerNormLSTMCell, input_size, hidden_size, decompose_layernorm], other_layer_args=[LayerNormLSTMCell, hidden_size * dirs, hidden_size, decompose_layernorm])",
    "docstring": "Returns a ScriptModule that mimics a PyTorch native LSTM.",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\fastrnns\\custom_lstms.py",
    "ast_data": "FunctionDef name:script_lnlstm arg:input_size arg:hidden_size arg:num_layers arg:bias arg:batch_first arg:dropout arg:bidirectional arg:decompose_layernorm arguments arg arg arg arg arg arg arg arg If Assign Assign Assign Assign Assign Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "statically_known_list_equals",
    "source_code": "def statically_known_list_equals(self, left: list[Expr], right: list[Expr]) -> bool:\n    return len(left) == len(right) and all((self.statically_known_equals(l, r) for l, r in zip(left, right)))",
    "docstring": "Returns a bool indicating if it is sound to optimize as if left and right lists are equal.",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\sizevars.py",
    "ast_data": "FunctionDef name:statically_known_list_equals arg:self arg:left arg:right arguments arg arg arg Return return:yes BoolOp Compare Call Call Call Call Call"
  },
  {
    "library": "cherrypy",
    "name": "__init__",
    "source_code": "def __init__(self, id=None, **kwargs):\n    self.id_observers = []\n    self._data = {}\n    for k, v in kwargs.items():\n        setattr(self, k, v)\n    self.originalid = id\n    self.missing = False\n    if id is None:\n        if self.debug:\n            cherrypy.log('No id given; making a new one', 'TOOLS.SESSIONS')\n        self._regenerate()\n    else:\n        self.id = id\n        if self._exists():\n            if self.debug:\n                cherrypy.log('Set id to %s.' % id, 'TOOLS.SESSIONS')\n        else:\n            if self.debug:\n                cherrypy.log('Expired or malicious session %r; making a new one' % id, 'TOOLS.SESSIONS')\n            self.id = None\n            self.missing = True\n            self._regenerate()",
    "docstring": "Initialize the session tool.",
    "type": "method",
    "file_path": "cherrypy\\cherrypy\\lib\\sessions.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:id arguments arg arg arg Assign Assign For Call Call Assign Assign If Compare If Call Call Assign If Call If Call If Call Assign Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "emit_counters",
    "source_code": "def emit_counters(self, category, name, pid, timestamp, counters):\n    event = self._create_event('C', category, name, pid, 0, timestamp)\n    event['args'] = counters.copy()\n    self._events.append(event)",
    "docstring": "Emits a counter record for the dictionary 'counters'. Args: category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. timestamp: The timestamp of this event as a long integer. counters: Dictionary of counter values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py",
    "ast_data": "FunctionDef name:emit_counters arg:self arg:category arg:name arg:pid arg:timestamp arg:counters arguments arg arg arg arg arg arg Assign Call Assign Call Call"
  },
  {
    "library": "tensorflow",
    "name": "_make_execution_function",
    "source_code": "def _make_execution_function(model, mode):\n    if model._distribution_strategy:\n        return distributed_training_utils_v1._make_execution_function(model, mode)\n    return model._make_execution_function(mode)",
    "docstring": "Makes function to run one step of model execution.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_arrays_v1.py",
    "ast_data": "FunctionDef name:_make_execution_function arg:model arg:mode arguments arg arg If Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "__init__",
    "source_code": "def __init__(self, root, attached_dependencies=None):\n    trackable_view.TrackableView.__init__(self, root)\n    self._root_ref = root if isinstance(root, weakref.ref) else weakref.ref(root)\n    self._attached_dependencies = attached_dependencies",
    "docstring": "Configure the graph view. Args: root: A object whose variables (including the variables of dependencies, recursively) should be saved. May be a weak reference. attached_dependencies: List of dependencies to attach to the root object. Used when saving a Checkpoint with a defined root object. To avoid reference cycles, this should use the WeakTrackableReference class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\graph_view.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:root arg:attached_dependencies arguments arg arg arg Call Assign Call Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "def fit(self, X, y, sample_weight=None, **params):\n    return super().fit(X, y, sample_weight=sample_weight, **params)",
    "docstring": "Fit Lasso model with coordinate descent. Fit is on grid of alphas and best alpha estimated by cross-validation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If y is mono-output, X can be sparse. Note that large sparse matrices and arrays requiring indices are not accepted. y : array-like of shape (n_samples,) Target values. sample_weight : float or array-like of shape (n_samples,), default=None Sample weights used for fitting and evaluation of the weighted mean squared error of each cv-fold. Note that the cross validated MSE that is finally used to find the best model is the unweighted mean over the (weighted) MSEs of each test fold. **params : dict, default=None Parameters to be passed to the CV splitter. .. versionadded:: 1.4 Only available if , which can be set by using `Metadata Routing User Guide ` for more details. Returns ------- self : object Returns an instance of fitted model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_coordinate_descent.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "summary_writer_initializer_op",
    "source_code": "def summary_writer_initializer_op():\n    if context.executing_eagerly():\n        raise RuntimeError('tf.contrib.summary.summary_writer_initializer_op is only supported in graph mode.')\n    return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME)",
    "docstring": "Graph-mode only. Returns the list of ops to create all summary writers. Returns: The initializer ops. Raises: RuntimeError: If in Eager mode.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py",
    "ast_data": "FunctionDef name:summary_writer_initializer_op arguments If Call Raise Call Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "localize_nodes",
    "source_code": "def localize_nodes(self, nodes: list[ir.IRNode], rewrite_index: Callable[['LocalizeBufferHandler', sympy.Expr, str], sympy.Expr]=rewrite_index_for_nodes) -> list[ir.IRNode]:\n    assert len(nodes) > 0\n\n    def wrap_inner_fn_for_node(node: ir.IRNode):\n        loops = node.data if isinstance(node, ir.ComputedBuffer) else node\n        assert isinstance(loops, ir.Loops)\n        new_inner_fn = self.localize_function(loops.inner_fn, rewrite_index)\n        new_loops = dataclasses.replace(loops, inner_fn=new_inner_fn)\n        if isinstance(node, ir.ComputedBuffer):\n            new_node = ir.ComputedBuffer(name=node.get_name(), layout=node.get_layout(), data=new_loops)\n        else:\n            new_node = new_loops\n        return new_node\n    return [wrap_inner_fn_for_node(node) for node in nodes]",
    "docstring": "Given and registered in current though the method of , localizes the to for the given and returns a new list of IR nodes that work on instead of , i.e., all the loads and stores are redirected to . This helps the fused loops to work on smaller-sized local buffers for better data locality. The the data access of is assumed to be contiguous with the same order as the .",
    "type": "method",
    "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_utils.py",
    "ast_data": "FunctionDef name:localize_nodes arg:self arg:nodes arg:rewrite_index arguments arg arg arg Compare Call FunctionDef name:wrap_inner_fn_for_node arg:node arguments arg Assign Call Call Assign Call Assign Call If Call Assign Call Call Call Assign Return return:yes Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "get_iterator",
    "source_code": "def get_iterator(dataset):\n    if context.executing_eagerly():\n        iterator = dataset_ops.make_one_shot_iterator(dataset)\n    else:\n        iterator = dataset_ops.make_initializable_iterator(dataset)\n    initialize_iterator(iterator)\n    return iterator",
    "docstring": "Create and initialize an iterator from a dataset.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py",
    "ast_data": "FunctionDef name:get_iterator arg:dataset arguments arg If Call Assign Call Assign Call Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "__call__",
    "source_code": "def __call__(self, num: float) -> str:\n    dnum = Decimal(str(num))\n    if Decimal.is_nan(dnum):\n        return 'NaN'\n    if Decimal.is_infinite(dnum):\n        return 'inf'\n    sign = 1\n    if dnum < 0:\n        sign = -1\n        dnum = -dnum\n    if dnum != 0:\n        pow10 = Decimal(int(math.floor(dnum.log10() / 3) * 3))\n    else:\n        pow10 = Decimal(0)\n    pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))\n    pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))\n    int_pow10 = int(pow10)\n    if self.use_eng_prefix:\n        prefix = self.ENG_PREFIXES[int_pow10]\n    elif int_pow10 < 0:\n        prefix = f'E-{-int_pow10:02d}'\n    else:\n        prefix = f'E+{int_pow10:02d}'\n    mant = sign * dnum / 10 ** pow10\n    if self.accuracy is None:\n        format_str = '{mant: g}{prefix}'\n    else:\n        format_str = f'{{mant: .{self.accuracy:d}f}}{{prefix}}'\n    formatted = format_str.format(mant=mant, prefix=prefix)\n    return formatted",
    "docstring": "Formats a number in engineering notation, appending a letter representing the power of 1000 of the original number. Some examples: >>> format_eng = EngFormatter(accuracy=0, use_eng_prefix=True) >>> format_eng(0) ' 0' >>> format_eng = EngFormatter(accuracy=1, use_eng_prefix=True) >>> format_eng(1_000_000) ' 1.0M' >>> format_eng = EngFormatter(accuracy=2, use_eng_prefix=False) >>> format_eng(\"-1e-6\") '-1.00E-06' @param num: the value to represent @type num: either a numeric value or a string that can be converted to a numeric value (as per decimal.Decimal constructor) @return: engineering formatted string",
    "type": "method",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:num arguments arg arg Assign Call Call If Call Return return:yes If Call Return return:yes Assign If Compare Assign Assign If Compare Assign Call Call Call Call Assign Call Assign Call Call Call Assign Call Call Call Assign Call If Assign If Compare Assign Assign Assign If Compare Assign Assign Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_shape_invariant_to_type_spec",
    "source_code": "def _shape_invariant_to_type_spec(self, shape):\n    raise NotImplementedError(f'{type(self).__name__}._shape_invariant_to_type_spec')",
    "docstring": "Returns a TypeSpec given a shape invariant (used by ). Args: shape: A object. The shape invariant for this , or if a default shape invariant should be used (based on the value of this ). Returns: A nested structure whose values are objects, specifying the shape invariants for the tensors that comprise this .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor.py",
    "ast_data": "FunctionDef name:_shape_invariant_to_type_spec arg:self arg:shape arguments arg arg Raise Call Call"
  },
  {
    "library": "scikit-learn",
    "name": "_check_X",
    "source_code": "def _check_X(self, X):\n    X = validate_data(self, X, dtype='int', accept_sparse=False, ensure_all_finite=True, reset=False)\n    check_non_negative(X, 'CategoricalNB (input X)')\n    return X",
    "docstring": "Validate X, used only in predict* methods.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\naive_bayes.py",
    "ast_data": "FunctionDef name:_check_X arg:self arg:X arguments arg arg Assign Call Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "return_outputs_and_add_losses",
    "source_code": "def return_outputs_and_add_losses(*args, **kwargs):\n    if return_method:\n        args = args[1:]\n    outputs, losses = fn(*args, **kwargs)\n    layer.add_loss(losses, inputs=True)\n    if context.executing_eagerly():\n        for i in layer._flatten_layers():\n            if i is not layer:\n                i._eager_losses = [base_layer_utils.REVIVED_LOSS_PLACEHOLDER]\n    return outputs",
    "docstring": "Returns the outputs from the layer call function, and adds the losses.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\utils.py",
    "ast_data": "FunctionDef name:return_outputs_and_add_losses arguments arg arg If Assign Assign Call Call If Call For Call If Compare Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "cluster_resolver",
    "source_code": "@property\ndef cluster_resolver(self):\n    return self.extended._tpu_cluster_resolver",
    "docstring": "Returns the cluster resolver associated with this strategy. provides the associated . If the user provides one in , that instance is returned; if the user does not, a default is provided.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py",
    "ast_data": "FunctionDef name:cluster_resolver arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "variables_initializer",
    "source_code": "@tf_export(v1=['initializers.variables', 'variables_initializer'])\ndef variables_initializer(var_list, name='init'):\n    if var_list and (not context.executing_eagerly()):\n        return control_flow_ops.group(*[v.initializer for v in var_list], name=name)\n    return control_flow_ops.no_op(name=name)",
    "docstring": "Returns an Op that initializes a list of variables. After you launch the graph in a session, you can run the returned Op to initialize all the variables in . This Op runs all the initializers of the variables in in parallel. Calling is equivalent to passing the list of initializers to . If is empty, however, the function still returns an Op that can be run. That Op just has no effect. @compatibility(TF2) In TF2, variables are initialized immediately when they are created. There is no longer a need to run variable initializers before using them. @end_compatibility Args: var_list: List of objects to initialize. name: Optional name for the returned operation. Returns: An Op that run the initializers of all the specified variables.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py",
    "ast_data": "FunctionDef name:variables_initializer arg:var_list arg:name arguments arg arg If BoolOp Call Return return:yes Call Return return:yes Call Call"
  },
  {
    "library": "pytorch",
    "name": "wrap_storage_to",
    "source_code": "def wrap_storage_to(self, device=None, non_blocking=False):\n    device_idx = _normalization_device(custom_backend_name, device)\n    if getattr(self, f'is_{custom_backend_name}'):\n        if self.get_device() == device_idx:\n            return self\n    if self.is_sparse:\n        raise RuntimeError(f'Can not support a sparse storage move to {custom_backend_name} backend')\n    untyped_storage = torch.UntypedStorage(self.size(), device=torch.device(f'{custom_backend_name}:{device_idx}'))\n    untyped_storage.copy_(self, non_blocking)\n    return untyped_storage",
    "docstring": "Return a copy of this object in custom device memory. If this object is already in device memory and on the correct device, then no copy is performed and the original object is returned. Args: device (int): The destination device id. Defaults to the current device. non_blocking (bool): If `` and the source is in pinned memory, the copy will be asynchronous with respect to the host. Otherwise, the argument has no effect.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\backend_registration.py",
    "ast_data": "FunctionDef name:wrap_storage_to arg:self arg:device arg:non_blocking arguments arg arg arg Assign Call If Call If Compare Call Return return:yes If Raise Call Assign Call Call Call Call Return return:yes"
  },
  {
    "library": "kornia",
    "name": "rgb_to_yuv",
    "source_code": "def rgb_to_yuv(image: Tensor) -> Tensor:\n    if not isinstance(image, Tensor):\n        raise TypeError(f'Input type is not a Tensor. Got {type(image)}')\n    if len(image.shape) < 3 or image.shape[-3] != 3:\n        raise ValueError(f'Input size must have a shape of (*, 3, H, W). Got {image.shape}')\n    r: Tensor = image[..., 0, :, :]\n    g: Tensor = image[..., 1, :, :]\n    b: Tensor = image[..., 2, :, :]\n    y: Tensor = 0.299 * r + 0.587 * g + 0.114 * b\n    u: Tensor = -0.147 * r - 0.289 * g + 0.436 * b\n    v: Tensor = 0.615 * r - 0.515 * g - 0.1 * b\n    out: Tensor = torch.stack([y, u, v], -3)\n    return out",
    "docstring": "Convert an RGB image to YUV. .. image:: _static/img/rgb_to_yuv.png The image data is assumed to be in the range of :math:. The range of the output is of :math: to luma and the ranges of U and V are :math: and :math:, respectively. The YUV model adopted here follows M/PAL values (see _, Table 2, items 2.5 and 2.6). Args: image: RGB Image to be converted to YUV with shape :math:. Returns: YUV version of the image with shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_yuv(input) # 2x3x4x5",
    "type": "function",
    "file_path": "kornia\\kornia\\color\\yuv.py",
    "ast_data": "FunctionDef name:rgb_to_yuv arg:image arguments arg If Call Raise Call Call If BoolOp Compare Call Compare Raise Call Call Return return:yes"
  },
  {
    "library": "django",
    "name": "set_available_apps",
    "source_code": "def set_available_apps(self, available):\n    available = set(available)\n    installed = {app_config.name for app_config in self.get_app_configs()}\n    if not available.issubset(installed):\n        raise ValueError(\"Available apps isn't a subset of installed apps, extra apps: %s\" % ', '.join(available - installed))\n    self.stored_app_configs.append(self.app_configs)\n    self.app_configs = {label: app_config for label, app_config in self.app_configs.items() if app_config.name in available}\n    self.clear_cache()",
    "docstring": "Restrict the set of installed apps used by get_app_config[s]. available must be an iterable of application names. set_available_apps() must be balanced with unset_available_apps(). Primarily used for performance optimization in TransactionTestCase. This method is safe in the sense that it doesn't trigger any imports.",
    "type": "method",
    "file_path": "django\\django\\apps\\registry.py",
    "ast_data": "FunctionDef name:set_available_apps arg:self arg:available arguments arg arg Assign Call Assign Call If Call Raise Call Call Call Assign Call Compare Call"
  },
  {
    "library": "tensorflow",
    "name": "Constraint",
    "source_code": "class Constraint:\n\n    def __call__(self, w):\n        return w\n\n    def get_config(self):\n        return {}",
    "docstring": "Base class for weight constraints. A instance works like a stateless function. Users who subclass this class should override the method, which takes a single weight parameter and return a projected version of that parameter (e.g. normalized or clipped). Constraints can be used with various Keras layers via the or arguments. Here's a simple example of a non-negative weight constraint: >>> class NonNegative(tf.keras.constraints.Constraint): ... ... def __call__(self, w): ... return w * tf.cast(tf.math.greater_equal(w, 0.), w.dtype) >>> weight = tf.constant((-1.0, 1.0)) >>> NonNegative()(weight) >>> tf.keras.layers.Dense(4, kernel_constraint=NonNegative())",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\constraints.py",
    "ast_data": "ClassDef name:Constraint FunctionDef name:__call__ arg:self arg:w arguments arg arg Return return:yes FunctionDef name:get_config arg:self arguments arg Return return:no"
  },
  {
    "library": "django",
    "name": "smart_str",
    "source_code": "def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):\n    if isinstance(s, Promise):\n        return s\n    return force_str(s, encoding, strings_only, errors)",
    "docstring": "Return a string representing 's'. Treat bytestrings using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects.",
    "type": "function",
    "file_path": "django\\django\\utils\\encoding.py",
    "ast_data": "FunctionDef name:smart_str arg:s arg:encoding arg:strings_only arg:errors arguments arg arg arg arg If Call Return return:yes Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "fit",
    "source_code": "@_fit_context(prefer_skip_nested_validation=True)\ndef fit(self, X, y, sample_weight=None):\n    return self._fit(X, y, sample_weight=sample_weight, incremental=False)",
    "docstring": "Fit the model to data matrix X and target(s) y. Parameters ---------- X : ndarray or sparse matrix of shape (n_samples, n_features) The input data. y : ndarray of shape (n_samples,) or (n_samples, n_outputs) The target values (class labels in classification, real numbers in regression). sample_weight : array-like of shape (n_samples,), default=None Sample weights. .. versionadded:: 1.7 Returns ------- self : object Returns a trained MLP model.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\neural_network\\_multilayer_perceptron.py",
    "ast_data": "FunctionDef name:fit arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "tensorflow",
    "name": "truncated_normal",
    "source_code": "def truncated_normal(self, shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, name=None):\n    with ops.name_scope(name, 'truncated_normal', [shape, mean, stddev]) as name:\n        shape_tensor = _shape_tensor(shape)\n        mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name='mean')\n        stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name='stddev')\n        rnd = self._truncated_normal(shape_tensor, dtype=dtype)\n        mul = rnd * stddev_tensor\n        return math_ops.add(mul, mean_tensor, name=name)",
    "docstring": "Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type . The mean of the truncated normal distribution. stddev: A 0-D Tensor or Python value of type . The standard deviation of the normal distribution, before truncation. dtype: The type of the output. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py",
    "ast_data": "FunctionDef name:truncated_normal arg:self arg:shape arg:mean arg:stddev arg:dtype arg:name arguments arg arg arg arg arg arg With Call Assign Call Assign Call Assign Call Assign Call Assign Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_execute_callbacks",
    "source_code": "def _execute_callbacks(callbacks, save_path):\n    for callback in callbacks:\n        num_params = len(inspect.signature(callback).parameters)\n        if num_params == 0:\n            callback()\n        elif num_params == 1:\n            callback(save_path)\n        else:\n            raise AssertionError(f'Callback functions for checkpoint are required to have 0 or 1parameters, but this has {num_params} parameters: {callback}')",
    "docstring": "Executes a list of callback functions, providing if needed.",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py",
    "ast_data": "FunctionDef name:_execute_callbacks arg:callbacks arg:save_path arguments arg arg For Assign Call Call If Compare Call If Compare Call Raise Call"
  },
  {
    "library": "pytorch",
    "name": "_get_layout",
    "source_code": "def _get_layout(name):\n    cache = _get_layout.cache\n    if not cache:\n        for v in torch.__dict__.values():\n            if isinstance(v, torch.layout):\n                cache[str(v)] = v\n    return cache[name]",
    "docstring": "Get layout extension object from its string representation.",
    "type": "function",
    "file_path": "pytorch\\torch\\serialization.py",
    "ast_data": "FunctionDef name:_get_layout arg:name arguments arg Assign If For Call If Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_pretty_print_target",
    "source_code": "@staticmethod\ndef _pretty_print_target(target: object) -> str:\n    if isinstance(target, str):\n        return target\n    if hasattr(target, '__module__'):\n        name = getattr(target, '__name__', None)\n        if name is None:\n            return _get_qualified_name(target)\n        if target.__module__ == 'builtins':\n            return f'builtins.{name}'\n        elif target.__module__ == '_operator':\n            return f'operator.{name}'\n    return _get_qualified_name(target)",
    "docstring": "Make target printouts more user-friendly. 1) builtins will be printed as 2) operators will be printed as 3) other callables will be printed with qualified name, e.g. torch.add",
    "type": "method",
    "file_path": "pytorch\\torch\\fx\\node.py",
    "ast_data": "FunctionDef name:_pretty_print_target arg:target arguments arg If Call Return return:yes If Call Assign Call If Compare Return return:yes Call If Compare Return return:yes If Compare Return return:yes Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "Problem10",
    "source_code": "class Problem10(Benchmark):\n\n    def __init__(self, dimensions=1):\n        Benchmark.__init__(self, dimensions)\n        self._bounds = [(0, 10)]\n        self.global_optimum = 7.9787\n        self.fglob = -7.916727\n\n    def fun(self, x, *args):\n        self.nfev += 1\n        x = x[0]\n        return -x * sin(x)",
    "docstring": "Univariate Problem10 objective function. This class defines the Univariate Problem10 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem10}}(x) = -x\\sin(x) Bound constraints: :math: .. figure:: figures/Problem10.png :alt: Univariate Problem10 function :align: center **Univariate Problem10 function** *Global optimum*: :math: for :math:",
    "type": "class",
    "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py",
    "ast_data": "ClassDef name:Problem10 FunctionDef name:__init__ arg:self arg:dimensions arguments arg arg Call Assign Assign Assign FunctionDef name:fun arg:self arg:x arguments arg arg arg Assign Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "broadcast",
    "source_code": "@_exception_logger\ndef broadcast(tensor: torch.Tensor, src: Optional[int]=None, group: Optional[ProcessGroup]=None, async_op: bool=False, group_src: Optional[int]=None):\n    group = _group_or_default_group(group)\n    group_src = _canonicalize_group_rank(group, src, group_src, return_global=False)\n    _check_single_tensor(tensor, 'tensor')\n    if _rank_not_in_group(group):\n        _warn_not_in_group('broadcast')\n        return\n    opts = BroadcastOptions()\n    opts.rootRank = group_src\n    opts.rootTensor = 0\n    opts.asyncOp = async_op\n    work = group.broadcast([tensor], opts)\n    if async_op:\n        return work\n    elif work is not None:\n        work.wait()",
    "docstring": "Broadcasts the tensor to the whole group. `` but not both. Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group",
    "type": "function",
    "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py",
    "ast_data": "FunctionDef name:broadcast arg:tensor arg:src arg:group arg:async_op arg:group_src arguments arg arg arg arg arg Assign Call Assign Call Call If Call Call Return return:no Assign Call Assign Assign Assign Assign Call If Return return:yes If Compare Call"
  },
  {
    "library": "pytorch",
    "name": "_generate_dict_info",
    "source_code": "def _generate_dict_info(self, model: GraphModule) -> dict[str, Any]:\n    module_dynamic_static_info = {}\n    for fqn, module in model.named_modules():\n        if self._is_supported(module):\n            pre_obs = getattr(module, self.DEFAULT_PRE_OBSERVER_NAME)\n            post_obs = getattr(module, self.DEFAULT_POST_OBSERVER_NAME)\n            pre_stat = pre_obs.get_batch_to_epoch_ratio()\n            post_stat = post_obs.get_batch_to_epoch_ratio()\n            dynamic_recommended = post_stat <= self.tolerance\n            pre_obs_dist_classif = self.STATIONARY_STR if pre_stat > self.tolerance else self.NON_STATIONARY_STR\n            post_obs_dist_classif = self.STATIONARY_STR if post_stat > self.tolerance else self.NON_STATIONARY_STR\n            is_supported_type = any((isinstance(module, x) for x in self.DEFAULT_DYNAMIC_STATIC_CHECK_SUPPORTED))\n            module_info = {self.TOLERANCE_KEY: self.tolerance, self.DEFAULT_DYNAMIC_REC_KEY: dynamic_recommended, self.PRE_OBS_COMP_STAT_KEY: pre_stat, self.PRE_OBS_DATA_DIST_KEY: pre_obs_dist_classif, self.POST_OBS_COMP_STAT_KEY: post_stat, self.POST_OBS_DATA_DIST_KEY: post_obs_dist_classif, self.IS_CURRENTLY_SUPPORTED_KEY: is_supported_type}\n            module_dynamic_static_info[fqn] = module_info\n    return module_dynamic_static_info",
    "docstring": "Helper function for generate_detector_report that does the generation of the dictionary. This process is done as specified in generate_detector_report documentation Args: model (GraphModule): The prepared and calibrated GraphModule with inserted ModelReportObservers Returns a Dictionary mapping modules with ModelReportObservers around them to: whether dynamic quantization is recommended their S metric of input to module whether input to module is stationary or non-stationary their S metric of output of module whether output of module is stationary or non-stationary the tolerance level to decided whether input/output is stationary or non-stationary whether it is currently supported or planned for the future",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py",
    "ast_data": "FunctionDef name:_generate_dict_info arg:self arg:model arguments arg arg Assign For Call If Call Assign Call Assign Call Assign Call Assign Call Assign Compare Assign Compare Assign Compare Assign Call Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_is_in_outmost_while_loop",
    "source_code": "def _is_in_outmost_while_loop(self, op):\n    ctxt = self._get_op_control_flow_context(op)\n    outer_while_context = control_flow_util.GetContainingWhileContext(ctxt)\n    return outer_while_context == control_flow_util.GetContainingWhileContext(self._outmost_context)",
    "docstring": "Returns true if the op is at the same level with the training loop. Returns false if the op is in an inner while loop or if it is outside of the training loop. Args: op: tf.Operation Returns: A boolean.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_is_in_outmost_while_loop arg:self arg:op arguments arg arg Assign Call Assign Call Return return:yes Compare Call"
  },
  {
    "library": "pytorch",
    "name": "baddbmm_flop",
    "source_code": "@register_flop_formula(aten.baddbmm)\ndef baddbmm_flop(self_shape, a_shape, b_shape, out_shape=None, **kwargs) -> int:\n    return bmm_flop(a_shape, b_shape)",
    "docstring": "Count flops for the baddbmm operation.",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\flop_counter.py",
    "ast_data": "FunctionDef name:baddbmm_flop arg:self_shape arg:a_shape arg:b_shape arg:out_shape arguments arg arg arg arg arg Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "get_width",
    "source_code": "def get_width(self):\n    return self._width",
    "docstring": "Return the width of the rectangle.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:get_width arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "set_grad_func",
    "source_code": "def set_grad_func(self, grad_func):\n    assert not self._grad_func\n    assert isinstance(grad_func, _DefinedFunction)\n    self._grad_func = grad_func",
    "docstring": "Specifies the gradient function of this function.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:set_grad_func arg:self arg:grad_func arguments arg arg Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "asarray",
    "source_code": "def asarray(obj: Array | bool | int | float | complex | NestedSequence[bool | int | float | complex] | SupportsBufferProtocol, /, *, dtype: Optional[DType]=None, device: Optional[Device]=None, copy: Optional[bool]=None, **kwargs) -> Array:\n    with cp.cuda.Device(device):\n        if copy is None:\n            return cp.asarray(obj, dtype=dtype, **kwargs)\n        else:\n            res = cp.array(obj, dtype=dtype, copy=copy, **kwargs)\n            if not copy and res is not obj:\n                raise ValueError('Unable to avoid copy while creating an array as requested')\n            return res",
    "docstring": "Array API compatibility wrapper for asarray(). See the corresponding documentation in the array library and/or the array API specification for more details.",
    "type": "function",
    "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\cupy\\_aliases.py",
    "ast_data": "FunctionDef name:asarray arguments arg arg arg arg arg With Call If Compare Return return:yes Call Assign Call If BoolOp Compare Raise Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "is_namedtuple",
    "source_code": "def is_namedtuple(instance, strict=False):\n    return _pywrap_utils.IsNamedtuple(instance, strict)",
    "docstring": "Returns True iff is a . Args: instance: An instance of a Python object. strict: If True, is considered to be a only if it is a \"plain\" namedtuple. For instance, a class inheriting from a will be considered to be a iff . Returns: True if is a .",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py",
    "ast_data": "FunctionDef name:is_namedtuple arg:instance arg:strict arguments arg arg Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "parents",
    "source_code": "@property\ndef parents(self):\n    return list(self.keys)",
    "docstring": "See 'FeatureColumn` base class.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py",
    "ast_data": "FunctionDef name:parents arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "from_tuples",
    "source_code": "@classmethod\n@names_compat\ndef from_tuples(cls, tuples: Iterable[tuple[Hashable, ...]], sortorder: int | None=None, names: Sequence[Hashable] | Hashable | None=None) -> MultiIndex:\n    if not is_list_like(tuples):\n        raise TypeError('Input must be a list / sequence of tuple-likes.')\n    if is_iterator(tuples):\n        tuples = list(tuples)\n    tuples = cast(Collection[tuple[Hashable, ...]], tuples)\n    if len(tuples) and all((isinstance(e, tuple) and (not e) for e in tuples)):\n        codes = [np.zeros(len(tuples))]\n        levels = [Index(com.asarray_tuplesafe(tuples, dtype=np.dtype('object')))]\n        return cls(levels=levels, codes=codes, sortorder=sortorder, names=names, verify_integrity=False)\n    arrays: list[Sequence[Hashable]]\n    if len(tuples) == 0:\n        if names is None:\n            raise TypeError('Cannot infer number of levels from empty list')\n        arrays = [[]] * len(names)\n    elif isinstance(tuples, (np.ndarray, Index)):\n        if isinstance(tuples, Index):\n            tuples = np.asarray(tuples._values)\n        arrays = list(lib.tuples_to_object_array(tuples).T)\n    elif isinstance(tuples, list):\n        arrays = list(lib.to_object_array_tuples(tuples).T)\n    else:\n        arrs = zip_longest(*tuples, fillvalue=np.nan)\n        arrays = cast(list[Sequence[Hashable]], arrs)\n    return cls.from_arrays(arrays, sortorder=sortorder, names=names)",
    "docstring": "Convert list of tuples to MultiIndex. Parameters ---------- tuples : list / sequence of tuple-likes Each tuple is the index of one row/column. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> tuples = [(1, \"red\"), (1, \"blue\"), (2, \"red\"), (2, \"blue\")] >>> pd.MultiIndex.from_tuples(tuples, names=(\"number\", \"color\")) MultiIndex([(1, 'red'), (1, 'blue'), (2, 'red'), (2, 'blue')], names=['number', 'color'])",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\multi.py",
    "ast_data": "FunctionDef name:from_tuples arg:cls arg:tuples arg:sortorder arg:names arguments arg arg arg arg If Call Raise Call If Call Assign Call Assign Call If BoolOp Call Call BoolOp Call Assign Call Call Assign Call Call Call Return return:yes Call If Compare Call If Compare Raise Call Assign Call If Call If Call Assign Call Assign Call Call If Call Assign Call Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scikit-learn",
    "name": "transform",
    "source_code": "@available_if(_search_estimator_has('transform'))\ndef transform(self, X):\n    check_is_fitted(self)\n    return self.best_estimator_.transform(X)",
    "docstring": "Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports `X` transformed in the new space based on the estimator with the best found parameters.",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py",
    "ast_data": "FunctionDef name:transform arg:self arg:X arguments arg arg Call Return return:yes Call Call Call"
  },
  {
    "library": "django",
    "name": "save",
    "source_code": "def save(self, domain_override=None, subject_template_name='registration/password_reset_subject.txt', email_template_name='registration/password_reset_email.html', use_https=False, token_generator=default_token_generator, from_email=None, request=None, html_email_template_name=None, extra_email_context=None):\n    email = self.cleaned_data['email']\n    if not domain_override:\n        current_site = get_current_site(request)\n        site_name = current_site.name\n        domain = current_site.domain\n    else:\n        site_name = domain = domain_override\n    email_field_name = UserModel.get_email_field_name()\n    for user in self.get_users(email):\n        user_email = getattr(user, email_field_name)\n        user_pk_bytes = force_bytes(UserModel._meta.pk.value_to_string(user))\n        context = {'email': user_email, 'domain': domain, 'site_name': site_name, 'uid': urlsafe_base64_encode(user_pk_bytes), 'user': user, 'token': token_generator.make_token(user), 'protocol': 'https' if use_https else 'http', **(extra_email_context or {})}\n        self.send_mail(subject_template_name, email_template_name, context, from_email, user_email, html_email_template_name=html_email_template_name)",
    "docstring": "Generate a one-use only link for resetting password and send it to the user.",
    "type": "method",
    "file_path": "django\\django\\contrib\\auth\\forms.py",
    "ast_data": "FunctionDef name:save arg:self arg:domain_override arg:subject_template_name arg:email_template_name arg:use_https arg:token_generator arg:from_email arg:request arg:html_email_template_name arg:extra_email_context arguments arg arg arg arg arg arg arg arg arg arg Assign If Assign Call Assign Assign Assign Assign Call For Call Assign Call Assign Call Call Assign Call Call BoolOp Call"
  },
  {
    "library": "pytorch",
    "name": "set_per_process_memory_fraction",
    "source_code": "def set_per_process_memory_fraction(fraction, device: 'Device'=None) -> None:\n    _lazy_init()\n    if device is None:\n        device = torch.cuda.current_device()\n    device = _get_device_index(device)\n    if not isinstance(fraction, float):\n        raise TypeError('Invalid type for fraction argument, must be `float`')\n    if fraction < 0 or fraction > 1:\n        raise ValueError(f'Invalid fraction value: {fraction}. Allowed range: 0~1')\n    torch._C._cuda_setMemoryFraction(fraction, device)",
    "docstring": "Set memory fraction for a process. The fraction is used to limit an caching allocator to allocated memory on a CUDA device. The allowed value equals the total visible memory multiplied fraction. If trying to allocate more than the allowed value in a process, will raise an out of memory error in allocator. Args: fraction(float): Range: 0~1. Allowed memory equals total_memory * fraction. device (torch.device or int, optional): selected device. If it is `` the default CUDA device is used. .. note:: In general, the total available free memory is less than the total capacity.",
    "type": "function",
    "file_path": "pytorch\\torch\\cuda\\memory.py",
    "ast_data": "FunctionDef name:set_per_process_memory_fraction arg:fraction arg:device arguments arg arg Call If Compare Assign Call Assign Call If Call Raise Call If BoolOp Compare Compare Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "close",
    "source_code": "def close(self):\n    self.event_writer.close()\n    self._closed = True",
    "docstring": "Flushes the event file to disk and close the file. Call this method when you do not need the summary writer anymore.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py",
    "ast_data": "FunctionDef name:close arg:self arguments arg Call Assign"
  },
  {
    "library": "scikit-learn",
    "name": "setup",
    "source_code": "def setup(self, X, y, sample_weight):\n    _, _, self.raw_prediction = self.linear_loss.weight_intercept_raw(self.coef, X)\n    self.loss_value = self.linear_loss.loss(coef=self.coef, X=X, y=y, sample_weight=sample_weight, l2_reg_strength=self.l2_reg_strength, n_threads=self.n_threads, raw_prediction=self.raw_prediction)",
    "docstring": "Precomputations If None, initializes: - self.coef Sets: - self.raw_prediction - self.loss_value",
    "type": "method",
    "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\_newton_solver.py",
    "ast_data": "FunctionDef name:setup arg:self arg:X arg:y arg:sample_weight arguments arg arg arg arg Assign Call Assign Call"
  },
  {
    "library": "pytorch",
    "name": "step",
    "source_code": "def step(self):\n    if self.record_steps and self.step_rec_fn:\n        self.step_rec_fn.__exit__(None, None, None)\n    prev_action = self.current_action\n    self.step_num += 1\n    self.current_action = self.schedule(self.step_num)\n    self._transit_action(prev_action, self.current_action)\n    if os.environ.get('KINETO_USE_DAEMON', '') or (is_fbcode() and os.environ.get('KINETO_FORCE_STEP_HOOK', '')):\n        prof.KinetoStepTracker.increment_step(PROFILER_STEP_NAME)\n    if self.record_steps:\n        self.step_rec_fn = prof.record_function('ProfilerStep#' + str(self.step_num))\n        self.step_rec_fn.__enter__()",
    "docstring": "Signals the profiler that the next profiling step has started.",
    "type": "method",
    "file_path": "pytorch\\torch\\profiler\\profiler.py",
    "ast_data": "FunctionDef name:step arg:self arguments arg If BoolOp Call Assign Assign Call Call If BoolOp Call BoolOp Call Call Call If Assign Call Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_copy_tick_props",
    "source_code": "def _copy_tick_props(self, src, dest):\n    if src is None or dest is None:\n        return\n    super()._copy_tick_props(src, dest)\n    trans = dest._get_text1_transform()[0]\n    dest.label1.set_transform(trans + dest._text1_translate)\n    trans = dest._get_text2_transform()[0]\n    dest.label2.set_transform(trans + dest._text2_translate)",
    "docstring": "Copy the props from src tick to dest tick.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py",
    "ast_data": "FunctionDef name:_copy_tick_props arg:self arg:src arg:dest arguments arg arg arg If BoolOp Compare Compare Return return:no Call Call Assign Call Call Assign Call Call"
  },
  {
    "library": "pytorch",
    "name": "_create_test",
    "source_code": "def _create_test(bench_op_obj, orig_test_attrs, tags, OperatorTestCase, run_backward, bwd_input):\n    test_attrs = copy.deepcopy(orig_test_attrs)\n    test_attrs = {k: str(v) for k, v in test_attrs.items()}\n    ascii_test_attrs = ast.literal_eval(json.dumps(test_attrs))\n    input_config = str(ascii_test_attrs)[1:-1].replace(\"'\", '')\n    if bwd_input:\n        test_attrs.update({'bwd': bwd_input})\n    test_name = bench_op_obj.test_name(**test_attrs)\n    test_config = TestConfig(test_name, input_config, tags, run_backward)\n    return OperatorTestCase(bench_op_obj, test_config)",
    "docstring": "Create tests with the benchmark backend. Args: bench_op_obj: an object which instantiated from a subclass of TorchBenchmarkBase which includes tensor creation and operator execution. orig_test_attrs: a dictionary includes test configs. tags: a attribute in test config to filter inputs OperatorTestCase: a named tuple to save the metadata of an test run_backward: a bool parameter indicating backward path",
    "type": "function",
    "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_core.py",
    "ast_data": "FunctionDef name:_create_test arg:bench_op_obj arg:orig_test_attrs arg:tags arg:OperatorTestCase arg:run_backward arg:bwd_input arguments arg arg arg arg arg arg Assign Call Assign Call Call Assign Call Call Assign Call Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "pandas",
    "name": "__array__",
    "source_code": "def __array__(self, dtype=None, copy=None) -> np.ndarray:\n    if copy is None:\n        return np.asarray(self._data, dtype=dtype)\n    return np.array(self._data, dtype=dtype, copy=copy)",
    "docstring": "The array interface, return my values.",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\indexes\\base.py",
    "ast_data": "FunctionDef name:__array__ arg:self arg:dtype arg:copy arguments arg arg arg If Compare Return return:yes Call Return return:yes Call"
  },
  {
    "library": "tensorflow",
    "name": "_export_to_saved_model_graph",
    "source_code": "def _export_to_saved_model_graph(self, object_map=None, tensor_map=None, options=None, **kwargs):\n    new_variable = None\n    if options.experimental_variable_policy._save_variable_devices():\n        with ops.device(self.device):\n            new_variable = copy_to_graph_uninitialized(self)\n    else:\n        new_variable = copy_to_graph_uninitialized(self)\n    object_map[self] = new_variable\n    tensor_map[self.handle] = new_variable.handle\n    return [self.handle]",
    "docstring": "For implementing .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py",
    "ast_data": "FunctionDef name:_export_to_saved_model_graph arg:self arg:object_map arg:tensor_map arg:options arguments arg arg arg arg arg Assign If Call With Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "begin_statement",
    "source_code": "def begin_statement(self, stmt):\n    self.active_stmts.add(stmt)",
    "docstring": "Marks the beginning of a statement. Args: stmt: Hashable, a key by which the statement can be identified in the CFG's stmt_prev and stmt_next attributes",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py",
    "ast_data": "FunctionDef name:begin_statement arg:self arg:stmt arguments arg arg Call"
  },
  {
    "library": "pytorch",
    "name": "get_unsafe_leaves",
    "source_code": "def get_unsafe_leaves(self, model, df, feature_columns):\n    X = df[feature_columns]\n    leaf_ids = model.apply(X)\n    unique_leaves = np.unique(leaf_ids)\n    unsafe_leaves = []\n    for leaf in unique_leaves:\n        leaf_mask = leaf_ids == leaf\n        leaf_X = X[leaf_mask]\n        predicted_config = model.predict(leaf_X.iloc[[0]])[0]\n        for idx, row in leaf_X.iterrows():\n            choice2time = json.loads(df.loc[idx, 'choice2time'])\n            if self.is_unsafe_leaf(row, predicted_config, choice2time):\n                unsafe_leaves.append(leaf)\n                break\n    return unsafe_leaves",
    "docstring": "Given a trained decision tree, and a dataframe containing the training data, returns a list of unsafe leaves.",
    "type": "method",
    "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py",
    "ast_data": "FunctionDef name:get_unsafe_leaves arg:self arg:model arg:df arg:feature_columns arguments arg arg arg arg Assign Assign Call Assign Call Assign For Assign Compare Assign Assign Call For Call Assign Call If Call Call Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "_normalize_image_array",
    "source_code": "@staticmethod\ndef _normalize_image_array(A):\n    A = cbook.safe_masked_invalid(A, copy=True)\n    if A.dtype != np.uint8 and (not np.can_cast(A.dtype, float, 'same_kind')):\n        raise TypeError(f'Image data of dtype {A.dtype} cannot be converted to float')\n    if A.ndim == 3 and A.shape[-1] == 1:\n        A = A.squeeze(-1)\n    if not (A.ndim == 2 or (A.ndim == 3 and A.shape[-1] in [3, 4])):\n        raise TypeError(f'Invalid shape {A.shape} for image data')\n    if A.ndim == 3:\n        high = 255 if np.issubdtype(A.dtype, np.integer) else 1\n        if A.min() < 0 or high < A.max():\n            _log.warning('Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). Got range [%s..%s].', A.min(), A.max())\n            A = np.clip(A, 0, high)\n        if A.dtype != np.uint8 and np.issubdtype(A.dtype, np.integer):\n            A = A.astype(np.uint8)\n    return A",
    "docstring": "Check validity of image-like input *A* and normalize it to a format suitable for Image subclasses.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\image.py",
    "ast_data": "FunctionDef name:_normalize_image_array arg:A arguments arg Assign Call If BoolOp Compare Call Raise Call If BoolOp Compare Compare Assign Call If BoolOp Compare BoolOp Compare Compare Raise Call If Compare Assign Call If BoolOp Compare Call Compare Call Call Call Call Assign Call If BoolOp Compare Call Assign Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "load",
    "source_code": "def load(name, sources: Union[str, list[str]], extra_cflags=None, extra_cuda_cflags=None, extra_sycl_cflags=None, extra_ldflags=None, extra_include_paths=None, build_directory=None, verbose=False, with_cuda: Optional[bool]=None, with_sycl: Optional[bool]=None, is_python_module=True, is_standalone=False, keep_intermediates=True):\n    return _jit_compile(name, [sources] if isinstance(sources, str) else sources, extra_cflags, extra_cuda_cflags, extra_sycl_cflags, extra_ldflags, extra_include_paths, build_directory or _get_build_directory(name, verbose), verbose, with_cuda, with_sycl, is_python_module, is_standalone, keep_intermediates=keep_intermediates)",
    "docstring": "Load a PyTorch C++ extension just-in-time (JIT). To load an extension, a Ninja build file is emitted, which is used to compile the given sources into a dynamic library. This library is subsequently loaded into the current Python process as a module and returned from this function, ready for use. By default, the directory to which the build file is emitted and the resulting library compiled to is ` the name of the extension. This location can be overridden in two ways. First, if the `True`True``. Return the path to the executable. (On Windows, TORCH_LIB_PATH is added to the PATH environment variable as a side effect.) Example: >>> # xdoctest: +SKIP >>> from torch.utils.cpp_extension import load >>> module = load( ... name='extension', ... sources=['extension.cpp', 'extension_kernel.cu'], ... extra_cflags=['-O2'], ... verbose=True)",
    "type": "function",
    "file_path": "pytorch\\torch\\utils\\cpp_extension.py",
    "ast_data": "FunctionDef name:load arg:name arg:sources arg:extra_cflags arg:extra_cuda_cflags arg:extra_sycl_cflags arg:extra_ldflags arg:extra_include_paths arg:build_directory arg:verbose arg:with_cuda arg:with_sycl arg:is_python_module arg:is_standalone arg:keep_intermediates arguments arg arg arg arg arg arg arg arg arg arg arg arg arg arg Return return:yes Call Call BoolOp Call"
  },
  {
    "library": "matplotlib",
    "name": "__call__",
    "source_code": "def __call__(self, value, clip=None):\n    if clip is None:\n        clip = self.clip\n    result, is_scalar = self.process_value(value)\n    if self.vmin is None or self.vmax is None:\n        self.autoscale_None(result)\n    (vmin,), _ = self.process_value(self.vmin)\n    (vmax,), _ = self.process_value(self.vmax)\n    if vmin == vmax:\n        result.fill(0)\n    elif vmin > vmax:\n        raise ValueError('minvalue must be less than or equal to maxvalue')\n    else:\n        if clip:\n            mask = np.ma.getmask(result)\n            result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax), mask=mask)\n        resdat = result.data\n        resdat -= vmin\n        resdat /= vmax - vmin\n        result = np.ma.array(resdat, mask=result.mask, copy=False)\n    if is_scalar:\n        result = result[0]\n    return result",
    "docstring": "Normalize the data and return the normalized data. Parameters ---------- value Data to normalize. clip : bool, optional See the description of the parameter *clip* in . If ``.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\colors.py",
    "ast_data": "FunctionDef name:__call__ arg:self arg:value arg:clip arguments arg arg arg If Compare Assign Assign Call If BoolOp Compare Compare Call Assign Call Assign Call If Compare Call If Compare Raise Call If Assign Call Assign Call Call Call Assign Assign Call If Assign Return return:yes"
  },
  {
    "library": "matplotlib",
    "name": "BracketAB",
    "source_code": "@_register_style(_style_list, name=']-[')\nclass BracketAB(_Curve):\n    arrow = ']-['\n\n    def __init__(self, widthA=1.0, lengthA=0.2, angleA=0, widthB=1.0, lengthB=0.2, angleB=0):\n        super().__init__(widthA=widthA, lengthA=lengthA, angleA=angleA, widthB=widthB, lengthB=lengthB, angleB=angleB)",
    "docstring": "An arrow with outward square brackets at both ends.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "ClassDef name:BracketAB Assign FunctionDef name:__init__ arg:self arg:widthA arg:lengthA arg:angleA arg:widthB arg:lengthB arg:angleB arguments arg arg arg arg arg arg arg Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "element_spec",
    "source_code": "@abc.abstractproperty\ndef element_spec(self):\n    raise NotImplementedError(f'{type(self)}.element_spec()')",
    "docstring": "The type specification of an element of this dataset. >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3]) >>> dataset.element_spec TensorSpec(shape=(), dtype=tf.int32, name=None) For more information, read [this guide]( Returns: A (nested) structure of objects matching the structure of an element of this dataset and specifying the type of individual components.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py",
    "ast_data": "FunctionDef name:element_spec arg:self arguments arg Raise Call Call"
  },
  {
    "library": "tensorflow",
    "name": "get_per_replica_batch_size",
    "source_code": "def get_per_replica_batch_size(self, global_batch_size):\n    if global_batch_size % self._num_replicas_in_sync != 0:\n        raise ValueError('The `global_batch_size` %r is not divisible by `num_replicas_in_sync` %r ' % (global_batch_size, self._num_replicas_in_sync))\n    return global_batch_size // self._num_replicas_in_sync",
    "docstring": "Returns the per-replica batch size. Args: global_batch_size: the global batch size which should be divisible by . Returns: the per-replica batch size. Raises: ValueError: if not divisible by .",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py",
    "ast_data": "FunctionDef name:get_per_replica_batch_size arg:self arg:global_batch_size arguments arg arg If Compare Raise Call Return return:yes"
  },
  {
    "library": "scipy",
    "name": "eigenvalues",
    "source_code": "def eigenvalues(self, m=None):\n    eigenvalues, _ = self._eigenvalue_ordering(m)\n    return eigenvalues",
    "docstring": "Return the requested number of eigenvalues. Parameters ---------- m : int, optional The positive number of smallest eigenvalues to return. If not provided, then all eigenvalues will be returned. Returns ------- eigenvalues : float array The requested smallest or all eigenvalues, in ascending order.",
    "type": "method",
    "file_path": "scipy\\scipy\\sparse\\linalg\\_special_sparse_arrays.py",
    "ast_data": "FunctionDef name:eigenvalues arg:self arg:m arguments arg arg Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_use_tensor_values_cache",
    "source_code": "def _use_tensor_values_cache(self):\n    return self._parameters.use_compact_trace",
    "docstring": "Returns True if immediate tensors should be first saved to a cache.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:_use_tensor_values_cache arg:self arguments arg Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "SqlDatasetV2",
    "source_code": "@tf_export('data.experimental.SqlDataset', v1=[])\nclass SqlDatasetV2(dataset_ops.DatasetSource):\n\n    def __init__(self, driver_name, data_source_name, query, output_types):\n        self._driver_name = ops.convert_to_tensor(driver_name, dtype=dtypes.string, name='driver_name')\n        self._data_source_name = ops.convert_to_tensor(data_source_name, dtype=dtypes.string, name='data_source_name')\n        self._query = ops.convert_to_tensor(query, dtype=dtypes.string, name='query')\n        self._element_spec = nest.map_structure(lambda dtype: tensor_spec.TensorSpec([], dtype), output_types)\n        variant_tensor = gen_experimental_dataset_ops.sql_dataset(self._driver_name, self._data_source_name, self._query, **self._flat_structure)\n        super(SqlDatasetV2, self).__init__(variant_tensor)\n\n    @property\n    def element_spec(self):\n        return self._element_spec",
    "docstring": "A consisting of the results from a SQL query. allows a user to read data from the result set of a SQL query. For example:",
    "type": "class",
    "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\readers.py",
    "ast_data": "ClassDef name:SqlDatasetV2 FunctionDef name:__init__ arg:self arg:driver_name arg:data_source_name arg:query arg:output_types arguments arg arg arg arg arg Assign Call Assign Call Assign Call Assign Call arguments arg Call Assign Call Call Call FunctionDef name:element_spec arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "maxpool2d_inference_rule",
    "source_code": "@register_inference_rule(torch.nn.MaxPool2d)\ndef maxpool2d_inference_rule(n: Node, module_instance):\n    assert isinstance(n.args[0], Node)\n    if n.args[0].type == Dyn and isinstance(n.type, TensorType):\n        n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__))\n    if isinstance(n.args[0].type, TensorType):\n        output = maxpool2d_check(n.args[0].type, module_instance)\n        n.type = get_greatest_upper_bound(output, n.type)\n    return n.type",
    "docstring": "Given a MaxPool2D instance and a node check the following conditions: - Input size matches size 3 or 4 - Current node type is consistent with the output type we will calculate - Input size matches output size and the last two dimensions of the output are w_out and h_out. The remaining dimensions are the same as the input - Our final result is the greatest upper bound of the output we calculate and the current node type.",
    "type": "function",
    "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py",
    "ast_data": "FunctionDef name:maxpool2d_inference_rule arg:n arg:module_instance arguments arg arg Call If BoolOp Compare Call Assign Call Call If Call Assign Call Assign Call Return return:yes Call"
  },
  {
    "library": "scipy",
    "name": "accept_test",
    "source_code": "def accept_test(self, x_new=None, *args, **kwargs):\n    if not hasattr(self.function, 'xmin'):\n        return True\n    if np.any(x_new < self.function.xmin):\n        return False\n    if np.any(x_new > self.function.xmax):\n        return False\n    return True",
    "docstring": "Does the new candidate vector lie in between the bounds? Returns ------- accept_test : bool The candidate vector lies in between the bounds",
    "type": "method",
    "file_path": "scipy\\benchmarks\\benchmarks\\optimize.py",
    "ast_data": "FunctionDef name:accept_test arg:self arg:x_new arguments arg arg arg arg If Call Return return:yes If Call Compare Return return:yes If Call Compare Return return:yes Return return:yes"
  },
  {
    "library": "numpy",
    "name": "power",
    "source_code": "@set_module('numpy.lib.scimath')\n@array_function_dispatch(_power_dispatcher)\ndef power(x, p):\n    x = _fix_real_lt_zero(x)\n    p = _fix_int_lt_zero(p)\n    return nx.power(x, p)",
    "docstring": "Return x to the power p, (x**p). If contains negative values, the output is converted to the complex domain. Parameters ---------- x : array_like The input value(s). p : array_like of ints The power(s) to which is raised. If contains multiple values, has to either be a scalar, or contain the same number of values as . In the latter case, the result is `xpout`, otherwise an array is returned. See Also -------- numpy.power Examples -------- >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.power(2, 2) 4 >>> np.emath.power([2, 4], 2) array([ 4, 16]) >>> np.emath.power([2, 4], -2) array([0.25 , 0.0625]) >>> np.emath.power([-2, 4], 2) array([ 4.-0.j, 16.+0.j]) >>> np.emath.power([2, 4], [2, 4]) array([ 4, 256])",
    "type": "function",
    "file_path": "numpy\\numpy\\lib\\_scimath_impl.py",
    "ast_data": "FunctionDef name:power arg:x arg:p arguments arg arg Assign Call Assign Call Return return:yes Call Call Call"
  },
  {
    "library": "tensorflow",
    "name": "trace_cpu",
    "source_code": "def trace_cpu(self, graph, tensor_fetches, op_fetches=None):\n    if isinstance(graph, func_graph.FuncGraph) or isinstance(graph, function._FuncGraph):\n        logging.warning('Tensor Tracer is not supported for tracing FuncGraphs. Ignoring tracing.')\n        return tensor_fetches\n    if graph in TensorTracer._traced_graphs:\n        logging.warning('Graph is already rewritten with tensor tracer, ignoring multiple calls.')\n        return tensor_fetches\n    else:\n        TensorTracer._traced_graphs.add(graph)\n    self._parameters = tensor_tracer_flags.TTParameters()\n    self._tt_config.device_type = _DEVICE_TYPE_CPU\n    self._tt_config.num_replicas = 1\n    self._tt_config.num_replicas_per_host = 1\n    self._tt_config.num_hosts = 1\n    self._replica_id = 0\n    if self._parameters.graph_dump_path:\n        graph_io.write_graph(graph, self._parameters.graph_dump_path, 'graph_before_tt.pbtxt')\n    with graph.as_default():\n        tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches, on_tpu=False)\n    if self._parameters.graph_dump_path:\n        graph_io.write_graph(graph, self._parameters.graph_dump_path, 'graph_after_tt.pbtxt')\n    return tensor_fetches",
    "docstring": "Traces the tensors generated by CPU Ops in a TF graph. Args: graph: the graph of Ops executed on the CPU. tensor_fetches: a (list,tuple,or a single object) of tensor fetches returned by model_fn given to session.run. Function must be provided with as least one tensor to fetch. op_fetches: A list of op fetches returned by model_fn given to session.run. op_fetches and tensor_fetches are used to determine the nodes that will be executed. Can be None. Returns: tensor_fetches: an exact copy of tensor_fetches that has additional dependencies.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py",
    "ast_data": "FunctionDef name:trace_cpu arg:self arg:graph arg:tensor_fetches arg:op_fetches arguments arg arg arg arg If BoolOp Call Call Call Return return:yes If Compare Call Return return:yes Call Assign Call Assign Assign Assign Assign Assign If Call With Call Assign Call If Call Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "set_qat_module",
    "source_code": "def set_qat_module(self, qat_module: type[torch.nn.Module]) -> BackendPatternConfig:\n    self.qat_module = qat_module\n    return self",
    "docstring": "Set the module that represents the QAT implementation for this pattern.",
    "type": "method",
    "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py",
    "ast_data": "FunctionDef name:set_qat_module arg:self arg:qat_module arguments arg arg Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_get_as_operand",
    "source_code": "def _get_as_operand(self):\n    return self._get()",
    "docstring": "Returns the value for operations for the current device. Some implementations, e.g. , are not able to return the value type within a replica context. They can, however, return a value that can be used by the operations below.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py",
    "ast_data": "FunctionDef name:_get_as_operand arg:self arguments arg Return return:yes Call"
  },
  {
    "library": "pytorch",
    "name": "compile",
    "source_code": "def compile(self, *args, **kwargs):\n    self._compiled_call_impl = torch.compile(self._call_impl, *args, **kwargs)",
    "docstring": "Compile this Module's forward using :func:. This Module's method is compiled and all arguments are passed as-is to :func:. See :func: for details on the arguments for this function.",
    "type": "method",
    "file_path": "pytorch\\torch\\nn\\modules\\module.py",
    "ast_data": "FunctionDef name:compile arg:self arguments arg arg arg Assign Call"
  },
  {
    "library": "django",
    "name": "__iter__",
    "source_code": "def __iter__(self):\n    for i in range(self.size):\n        yield self[i]",
    "docstring": "Iterate over each point in the coordinate sequence.",
    "type": "method",
    "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py",
    "ast_data": "FunctionDef name:__iter__ arg:self arguments arg For Call"
  },
  {
    "library": "pandas",
    "name": "_convert_string_array",
    "source_code": "def _convert_string_array(data: np.ndarray, encoding: str, errors: str) -> np.ndarray:\n    if len(data):\n        data = Series(data.ravel(), copy=False, dtype='object').str.encode(encoding, errors)._values.reshape(data.shape)\n    ensured = ensure_object(data.ravel())\n    itemsize = max(1, libwriters.max_len_string_array(ensured))\n    data = np.asarray(data, dtype=f'S{itemsize}')\n    return data",
    "docstring": "Take a string-like that is object dtype and coerce to a fixed size string type. Parameters ---------- data : np.ndarray[object] encoding : str errors : str Handler for encoding errors. Returns ------- np.ndarray[fixed-length-string]",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\pytables.py",
    "ast_data": "FunctionDef name:_convert_string_array arg:data arg:encoding arg:errors arguments arg arg arg If Call Assign Call Call Call Call Assign Call Call Assign Call Call Assign Call Return return:yes"
  },
  {
    "library": "cryptography",
    "name": "update",
    "source_code": "@abc.abstractmethod\ndef update(self, data: Buffer) -> None:\n    pass",
    "docstring": "Processes the provided bytes through the hash.",
    "type": "method",
    "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\hashes.py",
    "ast_data": "FunctionDef name:update arg:self arg:data arguments arg arg"
  },
  {
    "library": "tensorflow",
    "name": "_from_definition",
    "source_code": "def _from_definition(fdef, grad_func=None):\n    func = None\n    argnames = [arg.name for arg in fdef.signature.input_arg]\n    input_types = tuple((dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg))\n    func_name = fdef.signature.name\n    python_grad_func = None\n    out_names = [arg.name for arg in fdef.signature.output_arg]\n    result = _DefinedFunction(func, argnames, input_types, func_name, grad_func, python_grad_func, out_names)\n    if is_oss:\n        serialized = fdef.SerializeToString()\n        c_func = c_api.TF_FunctionImportFunctionDef(serialized)\n    else:\n        c_func = c_api.TF_FunctionImportFunctionDefNoSerialization(fdef)\n    result._c_func = c_api_util.ScopedTFFunction(c_func, func_name)\n    result._extra_inputs = []\n    result._op_def = fdef.signature\n    return result",
    "docstring": "Creates a _DefinedFunction initialized from a FunctionDef proto. Args: fdef: a FunctionDef grad_func: a _DefinedFunction or None Returns: A _DefinedFunction representing fdef",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py",
    "ast_data": "FunctionDef name:_from_definition arg:fdef arg:grad_func arguments arg arg Assign Assign Assign Call Call Assign Assign Assign Assign Call If Assign Call Assign Call Assign Call Assign Call Assign Assign Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_maybe_create_per_worker_vars",
    "source_code": "def _maybe_create_per_worker_vars(self):\n    if not self._per_worker_vars:\n        self._per_worker_vars = self._coordinator._create_per_worker_variables(self._var_creator)",
    "docstring": "Create variable on each worker if it hasn't been created.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py",
    "ast_data": "FunctionDef name:_maybe_create_per_worker_vars arg:self arguments arg If Assign Call"
  },
  {
    "library": "matplotlib",
    "name": "SecondLocator",
    "source_code": "class SecondLocator(RRuleLocator):\n\n    def __init__(self, bysecond=None, interval=1, tz=None):\n        if bysecond is None:\n            bysecond = range(60)\n        rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)\n        super().__init__(rule, tz=tz)",
    "docstring": "Make ticks on occurrences of each second.",
    "type": "class",
    "file_path": "matplotlib\\lib\\matplotlib\\dates.py",
    "ast_data": "ClassDef name:SecondLocator FunctionDef name:__init__ arg:self arg:bysecond arg:interval arg:tz arguments arg arg arg arg If Compare Assign Call Assign Call Call Call"
  },
  {
    "library": "pandas",
    "name": "is_type_factory",
    "source_code": "def is_type_factory(_type: type[Any]) -> Callable[[Any], None]:\n\n    def inner(x) -> None:\n        if type(x) != _type:\n            raise ValueError(f\"Value must have type '{_type}'\")\n    return inner",
    "docstring": "Parameters ---------- - a type to be compared against (e.g. type(x) == ) Returns ------- validator - a function of a single argument x , which raises ValueError if type(x) is not equal to",
    "type": "function",
    "file_path": "pandas\\pandas\\_config\\config.py",
    "ast_data": "FunctionDef name:is_type_factory arg:_type arguments arg FunctionDef name:inner arg:x arguments arg If Compare Call Raise Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "get_format_timedelta64",
    "source_code": "def get_format_timedelta64(values: TimedeltaArray, nat_rep: str | float='NaT', box: bool=False) -> Callable:\n    even_days = values._is_dates_only\n    if even_days:\n        format = None\n    else:\n        format = 'long'\n\n    def _formatter(x):\n        if x is None or (is_scalar(x) and isna(x)):\n            return nat_rep\n        if not isinstance(x, Timedelta):\n            x = Timedelta(x)\n        result = x._repr_base(format=format)\n        if box:\n            result = f\"'{result}'\"\n        return result\n    return _formatter",
    "docstring": "Return a formatter function for a range of timedeltas. These will all have the same format argument If box, then show the return in quotes",
    "type": "function",
    "file_path": "pandas\\pandas\\io\\formats\\format.py",
    "ast_data": "FunctionDef name:get_format_timedelta64 arg:values arg:nat_rep arg:box arguments arg arg arg Assign If Assign Assign FunctionDef name:_formatter arg:x arguments arg If BoolOp Compare BoolOp Call Call Return return:yes If Call Assign Call Assign Call If Assign Return return:yes Return return:yes"
  },
  {
    "library": "pytorch",
    "name": "_get_mobile_model_contained_types",
    "source_code": "def _get_mobile_model_contained_types(f_input) -> int:\n    if isinstance(f_input, (str, os.PathLike)):\n        if not os.path.exists(f_input):\n            raise ValueError(f'The provided filename {f_input} does not exist')\n        if os.path.isdir(f_input):\n            raise ValueError(f'The provided filename {f_input} is a directory')\n    if isinstance(f_input, (str, os.PathLike)):\n        return torch._C._get_mobile_model_contained_types(os.fspath(f_input))\n    else:\n        return torch._C._get_mobile_model_contained_types_from_buffer(f_input.read())",
    "docstring": "Take a file-like object and return a set of string, like (\"int\", \"Optional\"). Args: f_input: a file-like object (has to implement read, readline, tell, and seek), or a string containing a file name Returns: type_list: A set of string, like (\"int\", \"Optional\"). These are types used in bytecode. Example: .. testcode:: from torch.jit.mobile import _get_mobile_model_contained_types # Get type list from a saved file path type_list = _get_mobile_model_contained_types(\"path/to/model.ptl\")",
    "type": "function",
    "file_path": "pytorch\\torch\\jit\\mobile\\__init__.py",
    "ast_data": "FunctionDef name:_get_mobile_model_contained_types arg:f_input arguments arg If Call If Call Raise Call If Call Raise Call If Call Return return:yes Call Call Return return:yes Call Call"
  },
  {
    "library": "matplotlib",
    "name": "_update_cursor",
    "source_code": "def _update_cursor(self, event):\n    if self.mode and event.inaxes and event.inaxes.get_navigate():\n        if self.mode == _Mode.ZOOM and self._last_cursor != tools.Cursors.SELECT_REGION:\n            self.canvas.set_cursor(tools.Cursors.SELECT_REGION)\n            self._last_cursor = tools.Cursors.SELECT_REGION\n        elif self.mode == _Mode.PAN and self._last_cursor != tools.Cursors.MOVE:\n            self.canvas.set_cursor(tools.Cursors.MOVE)\n            self._last_cursor = tools.Cursors.MOVE\n    elif self._last_cursor != tools.Cursors.POINTER:\n        self.canvas.set_cursor(tools.Cursors.POINTER)\n        self._last_cursor = tools.Cursors.POINTER",
    "docstring": "Update the cursor after a mouse move event or a tool (de)activation.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py",
    "ast_data": "FunctionDef name:_update_cursor arg:self arg:event arguments arg arg If BoolOp Call If BoolOp Compare Compare Call Assign If BoolOp Compare Compare Call Assign If Compare Call Assign"
  },
  {
    "library": "matplotlib",
    "name": "set_width",
    "source_code": "def set_width(self, w):\n    self._width = w\n    self.stale = True",
    "docstring": "Set the rectangle width. Parameters ---------- w : float",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\patches.py",
    "ast_data": "FunctionDef name:set_width arg:self arg:w arguments arg arg Assign Assign"
  },
  {
    "library": "scipy",
    "name": "lower_border_end",
    "source_code": "@property\ndef lower_border_end(self) -> tuple[int, int]:\n    if self._lower_border_end is not None:\n        return self._lower_border_end\n    m0 = np.flatnonzero(self.win.real ** 2 + self.win.imag ** 2)[0]\n    k0 = -self.m_num_mid + m0\n    for q_, k_ in enumerate(range(k0, self.hop + 1, self.hop)):\n        if k_ + self.hop >= 0:\n            self._lower_border_end = (k_ + self.m_num, q_ + 1)\n            return self._lower_border_end\n    self._lower_border_end = (0, max(self.p_min, 0))\n    return self._lower_border_end",
    "docstring": "First signal index and first slice index unaffected by pre-padding. Describes the point where the window does not stick out to the left of the signal domain. A detailed example is provided in the :ref: section of the :ref:. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. lower_border_end: Where pre-padding effects end. p_min: The smallest possible slice index. p_max: Index of first non-overlapping upper time slice. p_num: Number of time slices, i.e., - . p_range: Determine and validate slice index range. upper_border_begin: Where post-padding effects start. ShortTimeFFT: Class this property belongs to.",
    "type": "method",
    "file_path": "scipy\\scipy\\signal\\_short_time_fft.py",
    "ast_data": "FunctionDef name:lower_border_end arg:self arguments arg If Compare Return return:yes Assign Call Assign For Call Call If Compare Assign Return return:yes Assign Call Return return:yes"
  },
  {
    "library": "tensorflow",
    "name": "_import_config",
    "source_code": "def _import_config(self):\n    if self._config is None:\n        return\n    num_cpus = self._config.device_count.get('CPU', 1)\n    if num_cpus != 1:\n        cpus = [d for d in self._physical_devices if d.device_type == 'CPU']\n        if num_cpus == 0:\n            self.set_visible_devices([], 'CPU')\n        elif num_cpus > 1:\n            self.set_logical_device_configuration(cpus[0], [LogicalDeviceConfiguration() for _ in range(num_cpus)])\n    gpus = [d for d in self._physical_devices if d.device_type == 'GPU']\n    if not gpus:\n        return\n    gpu_count = self._config.device_count.get('GPU', None)\n    visible_gpus = []\n    visible_indices = self._config.gpu_options.visible_device_list\n    if visible_indices:\n        for index in visible_indices.split(','):\n            if int(index) >= len(gpus):\n                raise ValueError('Invalid visible device index: %s' % index)\n            visible_gpus.append(gpus[int(index)])\n    else:\n        visible_gpus = gpus\n    if gpu_count is not None:\n        visible_gpus = visible_gpus[:gpu_count]\n    self.set_visible_devices(visible_gpus, 'GPU')",
    "docstring": "Import config if passed in during construction. If Context was created with a ConfigProto such as when calling tf.compat.v1.enable_eager_execution(), then we need to pull out the various pieces we might be replacing and import then into our internal class representation.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py",
    "ast_data": "FunctionDef name:_import_config arg:self arguments arg If Compare Return return:no Assign Call If Compare Assign Compare If Compare Call If Compare Call Call Call Assign Compare If Return return:no Assign Call Assign Assign If For Call If Compare Call Call Raise Call Call Call Assign If Compare Assign Call"
  },
  {
    "library": "tensorflow",
    "name": "get_config",
    "source_code": "def get_config(self):\n    return {}",
    "docstring": "Returns a Python dict of the object config. A constraint config is a Python dictionary (JSON-serializable) that can be used to reinstantiate the same object. Returns: Python dict containing the configuration of the constraint object.",
    "type": "method",
    "file_path": "tensorflow\\tensorflow\\python\\keras\\constraints.py",
    "ast_data": "FunctionDef name:get_config arg:self arguments arg Return return:no"
  },
  {
    "library": "tensorflow",
    "name": "_gen_save_and_restore_functions",
    "source_code": "def _gen_save_and_restore_functions(checkpoint_factory_map: object_identity.ObjectIdentityDictionary) -> object_identity.ObjectIdentityDictionary:\n    saveable_fn_map = object_identity.ObjectIdentityDictionary()\n    for obj, factory_data_list in checkpoint_factory_map.items():\n        if resource_variable_ops.is_resource_variable(obj) or not factory_data_list:\n            continue\n        if factory_data_list[0].name == trackable_utils.SERIALIZE_TO_TENSORS_NAME:\n            assert len(factory_data_list) == 1\n            saveable_fn_map[obj] = {trackable_utils.SERIALIZE_TO_TENSORS_NAME: tracing_utils.trace_save_and_restore(obj)}\n        else:\n            saveable_fn_map[obj] = trace_saveable_util.trace_save_restore_function_map(obj, factory_data_list)\n    return saveable_fn_map",
    "docstring": "Generates global and individual save/restore concrete functions. The global functions records the ops to save and restore the entire object to a file prefix, while the individual functions save and restore value tensors for resources. This function is intended to run on the output of , which returns the generated a map of . Args: checkpoint_factory_map: A dictionary mapping trackable objects to a list of . Returns: Tuple of ( saveable_fn_map: Maps obj -> factory name -> (concrete save, restore) )",
    "type": "function",
    "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py",
    "ast_data": "FunctionDef name:_gen_save_and_restore_functions arg:checkpoint_factory_map arguments arg Assign Call For Call If BoolOp Call If Compare Compare Call Assign Call Assign Call Return return:yes"
  },
  {
    "library": "pandas",
    "name": "add",
    "source_code": "def add(self, other, level=None, fill_value=None, axis: Axis=0) -> Series:\n    return self._flex_method(other, operator.add, level=level, fill_value=fill_value, axis=axis)",
    "docstring": "Return Addition of series and other, element-wise (binary operator ). Equivalent to `Python documentation `_ for more details. Examples -------- >>> a = pd.Series([1, 1, 1, np.nan], index=[\"a\", \"b\", \"c\", \"d\"]) >>> a a 1.0 b 1.0 c 1.0 d NaN dtype: float64 >>> b = pd.Series([1, np.nan, 1, np.nan], index=[\"a\", \"b\", \"d\", \"e\"]) >>> b a 1.0 b NaN d 1.0 e NaN dtype: float64 >>> a.add(b, fill_value=0) a 2.0 b 1.0 c 1.0 d 1.0 e NaN dtype: float64",
    "type": "method",
    "file_path": "pandas\\pandas\\core\\series.py",
    "ast_data": "FunctionDef name:add arg:self arg:other arg:level arg:fill_value arg:axis arguments arg arg arg arg arg Return return:yes Call"
  },
  {
    "library": "matplotlib",
    "name": "__init__",
    "source_code": "def __init__(self, a, b, **kwargs):\n    if a.output_dims != b.input_dims:\n        raise ValueError(\"The output dimension of 'a' must be equal to the input dimensions of 'b'\")\n    self.input_dims = a.input_dims\n    self.output_dims = b.output_dims\n    super().__init__(**kwargs)\n    self._a = a\n    self._b = b\n    self.set_children(a, b)",
    "docstring": "Create a new composite transform that is the result of applying transform *a* then transform *b*. You will generally not call this constructor directly but write `` instead, which will automatically choose the best kind of composite transform instance to create.",
    "type": "method",
    "file_path": "matplotlib\\lib\\matplotlib\\transforms.py",
    "ast_data": "FunctionDef name:__init__ arg:self arg:a arg:b arguments arg arg arg arg If Compare Raise Call Assign Assign Call Call Assign Assign Call"
  }
]